1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/random.h>
11 #include <linux/topology.h>
12 #include <asm/processor.h>
14 #include <asm/cacheinfo.h>
16 #include <asm/spec-ctrl.h>
19 #include <asm/pci-direct.h>
20 #include <asm/delay.h>
21 #include <asm/debugreg.h>
22 #include <asm/resctrl.h>
25 # include <asm/mmconfig.h>
31 * nodes_per_socket: Stores the number of nodes per socket.
32 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
33 * Node Identifiers[10:8]
35 static u32 nodes_per_socket = 1;
40 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
41 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
42 * have an OSVW id assigned, which it takes as first argument. Both take a
43 * variable number of family-specific model-stepping ranges created by
48 * const int amd_erratum_319[] =
49 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
50 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
51 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
54 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
55 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
56 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
57 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
58 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
59 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
60 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
62 static const int amd_erratum_400[] =
63 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
64 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
66 static const int amd_erratum_383[] =
67 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
69 /* #1054: Instructions Retired Performance Counter May Be Inaccurate */
70 static const int amd_erratum_1054[] =
71 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
73 static const int amd_zenbleed[] =
74 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
75 AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
76 AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
77 AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
79 static const int amd_div0[] =
80 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
81 AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
83 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
85 int osvw_id = *erratum++;
89 if (osvw_id >= 0 && osvw_id < 65536 &&
90 cpu_has(cpu, X86_FEATURE_OSVW)) {
93 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
94 if (osvw_id < osvw_len) {
97 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
99 return osvw_bits & (1ULL << (osvw_id & 0x3f));
103 /* OSVW unavailable or ID unknown, match family-model-stepping range */
104 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
105 while ((range = *erratum++))
106 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
107 (ms >= AMD_MODEL_RANGE_START(range)) &&
108 (ms <= AMD_MODEL_RANGE_END(range)))
114 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
119 WARN_ONCE((boot_cpu_data.x86 != 0xf),
120 "%s should only be used on K8!\n", __func__);
123 gprs[7] = 0x9c5a203a;
125 err = rdmsr_safe_regs(gprs);
127 *p = gprs[0] | ((u64)gprs[2] << 32);
132 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
136 WARN_ONCE((boot_cpu_data.x86 != 0xf),
137 "%s should only be used on K8!\n", __func__);
142 gprs[7] = 0x9c5a203a;
144 return wrmsr_safe_regs(gprs);
148 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
149 * misexecution of code under Linux. Owners of such processors should
150 * contact AMD for precise details and a CPU swap.
152 * See http://www.multimania.com/poulot/k6bug.html
153 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
154 * (Publication # 21266 Issue Date: August 1998)
156 * The following test is erm.. interesting. AMD neglected to up
157 * the chip setting when fixing the bug but they also tweaked some
158 * performance at the same time..
162 extern __visible void vide(void);
165 ".type vide, @function\n"
170 static void init_amd_k5(struct cpuinfo_x86 *c)
174 * General Systems BIOSen alias the cpu frequency registers
175 * of the Elan at 0x000df000. Unfortunately, one of the Linux
176 * drivers subsequently pokes it, and changes the CPU speed.
177 * Workaround : Remove the unneeded alias.
179 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
180 #define CBAR_ENB (0x80000000)
181 #define CBAR_KEY (0X000000CB)
182 if (c->x86_model == 9 || c->x86_model == 10) {
183 if (inl(CBAR) & CBAR_ENB)
184 outl(0 | CBAR_KEY, CBAR);
189 static void init_amd_k6(struct cpuinfo_x86 *c)
193 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
195 if (c->x86_model < 6) {
196 /* Based on AMD doc 20734R - June 2000 */
197 if (c->x86_model == 0) {
198 clear_cpu_cap(c, X86_FEATURE_APIC);
199 set_cpu_cap(c, X86_FEATURE_PGE);
204 if (c->x86_model == 6 && c->x86_stepping == 1) {
205 const int K6_BUG_LOOP = 1000000;
207 void (*f_vide)(void);
210 pr_info("AMD K6 stepping B detected - ");
213 * It looks like AMD fixed the 2.6.2 bug and improved indirect
214 * calls at the same time.
219 OPTIMIZER_HIDE_VAR(f_vide);
226 if (d > 20*K6_BUG_LOOP)
227 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
229 pr_cont("probably OK (after B9730xxxx).\n");
232 /* K6 with old style WHCR */
233 if (c->x86_model < 8 ||
234 (c->x86_model == 8 && c->x86_stepping < 8)) {
235 /* We can only write allocate on the low 508Mb */
239 rdmsr(MSR_K6_WHCR, l, h);
240 if ((l&0x0000FFFF) == 0) {
242 l = (1<<0)|((mbytes/4)<<1);
243 local_irq_save(flags);
245 wrmsr(MSR_K6_WHCR, l, h);
246 local_irq_restore(flags);
247 pr_info("Enabling old style K6 write allocation for %d Mb\n",
253 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
254 c->x86_model == 9 || c->x86_model == 13) {
255 /* The more serious chips .. */
260 rdmsr(MSR_K6_WHCR, l, h);
261 if ((l&0xFFFF0000) == 0) {
263 l = ((mbytes>>2)<<22)|(1<<16);
264 local_irq_save(flags);
266 wrmsr(MSR_K6_WHCR, l, h);
267 local_irq_restore(flags);
268 pr_info("Enabling new style K6 write allocation for %d Mb\n",
275 if (c->x86_model == 10) {
276 /* AMD Geode LX is model 10 */
277 /* placeholder for any needed mods */
283 static void init_amd_k7(struct cpuinfo_x86 *c)
289 * Bit 15 of Athlon specific MSR 15, needs to be 0
290 * to enable SSE on Palomino/Morgan/Barton CPU's.
291 * If the BIOS didn't enable it already, enable it here.
293 if (c->x86_model >= 6 && c->x86_model <= 10) {
294 if (!cpu_has(c, X86_FEATURE_XMM)) {
295 pr_info("Enabling disabled K7/SSE Support.\n");
296 msr_clear_bit(MSR_K7_HWCR, 15);
297 set_cpu_cap(c, X86_FEATURE_XMM);
302 * It's been determined by AMD that Athlons since model 8 stepping 1
303 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
304 * As per AMD technical note 27212 0.2
306 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
307 rdmsr(MSR_K7_CLK_CTL, l, h);
308 if ((l & 0xfff00000) != 0x20000000) {
309 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
310 l, ((l & 0x000fffff)|0x20000000));
311 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
315 /* calling is from identify_secondary_cpu() ? */
320 * Certain Athlons might work (for various values of 'work') in SMP
321 * but they are not certified as MP capable.
323 /* Athlon 660/661 is valid. */
324 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
325 (c->x86_stepping == 1)))
328 /* Duron 670 is valid */
329 if ((c->x86_model == 7) && (c->x86_stepping == 0))
333 * Athlon 662, Duron 671, and Athlon >model 7 have capability
334 * bit. It's worth noting that the A5 stepping (662) of some
335 * Athlon XP's have the MP bit set.
336 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
339 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
340 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
342 if (cpu_has(c, X86_FEATURE_MP))
345 /* If we get here, not a certified SMP capable AMD system. */
348 * Don't taint if we are running SMP kernel on a single non-MP
351 WARN_ONCE(1, "WARNING: This combination of AMD"
352 " processors is not suitable for SMP.\n");
353 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
359 * To workaround broken NUMA config. Read the comment in
360 * srat_detect_node().
362 static int nearby_node(int apicid)
366 for (i = apicid - 1; i >= 0; i--) {
367 node = __apicid_to_node[i];
368 if (node != NUMA_NO_NODE && node_online(node))
371 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
372 node = __apicid_to_node[i];
373 if (node != NUMA_NO_NODE && node_online(node))
376 return first_node(node_online_map); /* Shouldn't happen */
381 * Fix up cpu_core_id for pre-F17h systems to be in the
382 * [0 .. cores_per_node - 1] range. Not really needed but
383 * kept so as not to break existing setups.
385 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
392 cus_per_node = c->x86_max_cores / nodes_per_socket;
393 c->cpu_core_id %= cus_per_node;
397 * Fixup core topology information for
398 * (1) AMD multi-node processors
399 * Assumption: Number of cores in each internal node is the same.
400 * (2) AMD processors supporting compute units
402 static void amd_get_topology(struct cpuinfo_x86 *c)
404 int cpu = smp_processor_id();
406 /* get information required for multi-node processors */
407 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
409 u32 eax, ebx, ecx, edx;
411 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
413 c->cpu_die_id = ecx & 0xff;
416 c->cu_id = ebx & 0xff;
418 if (c->x86 >= 0x17) {
419 c->cpu_core_id = ebx & 0xff;
421 if (smp_num_siblings > 1)
422 c->x86_max_cores /= smp_num_siblings;
426 * In case leaf B is available, use it to derive
427 * topology information.
429 err = detect_extended_topology(c);
431 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
433 cacheinfo_amd_init_llc_id(c, cpu);
435 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
438 rdmsrl(MSR_FAM10H_NODE_ID, value);
439 c->cpu_die_id = value & 7;
441 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
445 if (nodes_per_socket > 1) {
446 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
447 legacy_fixup_core_id(c);
452 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
453 * Assumes number of cores is a power of two.
455 static void amd_detect_cmp(struct cpuinfo_x86 *c)
458 int cpu = smp_processor_id();
460 bits = c->x86_coreid_bits;
461 /* Low order bits define the core id (index of core in socket) */
462 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
463 /* Convert the initial APIC ID into the socket ID */
464 c->phys_proc_id = c->initial_apicid >> bits;
465 /* use socket ID also for last level cache */
466 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
469 u32 amd_get_nodes_per_socket(void)
471 return nodes_per_socket;
473 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
475 static void srat_detect_node(struct cpuinfo_x86 *c)
478 int cpu = smp_processor_id();
480 unsigned apicid = c->apicid;
482 node = numa_cpu_node(cpu);
483 if (node == NUMA_NO_NODE)
484 node = get_llc_id(cpu);
487 * On multi-fabric platform (e.g. Numascale NumaChip) a
488 * platform-specific handler needs to be called to fixup some
491 if (x86_cpuinit.fixup_cpu_id)
492 x86_cpuinit.fixup_cpu_id(c, node);
494 if (!node_online(node)) {
496 * Two possibilities here:
498 * - The CPU is missing memory and no node was created. In
499 * that case try picking one from a nearby CPU.
501 * - The APIC IDs differ from the HyperTransport node IDs
502 * which the K8 northbridge parsing fills in. Assume
503 * they are all increased by a constant offset, but in
504 * the same order as the HT nodeids. If that doesn't
505 * result in a usable node fall back to the path for the
508 * This workaround operates directly on the mapping between
509 * APIC ID and NUMA node, assuming certain relationship
510 * between APIC ID, HT node ID and NUMA topology. As going
511 * through CPU mapping may alter the outcome, directly
512 * access __apicid_to_node[].
514 int ht_nodeid = c->initial_apicid;
516 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
517 node = __apicid_to_node[ht_nodeid];
518 /* Pick a nearby node */
519 if (!node_online(node))
520 node = nearby_node(apicid);
522 numa_set_node(cpu, node);
526 static void early_init_amd_mc(struct cpuinfo_x86 *c)
531 /* Multi core CPU? */
532 if (c->extended_cpuid_level < 0x80000008)
535 ecx = cpuid_ecx(0x80000008);
537 c->x86_max_cores = (ecx & 0xff) + 1;
539 /* CPU telling us the core id bits shift? */
540 bits = (ecx >> 12) & 0xF;
542 /* Otherwise recompute */
544 while ((1 << bits) < c->x86_max_cores)
548 c->x86_coreid_bits = bits;
552 static void bsp_init_amd(struct cpuinfo_x86 *c)
554 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
557 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
560 rdmsrl(MSR_K7_HWCR, val);
561 if (!(val & BIT(24)))
562 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
566 if (c->x86 == 0x15) {
567 unsigned long upperbit;
570 cpuid = cpuid_edx(0x80000005);
571 assoc = cpuid >> 16 & 0xff;
572 upperbit = ((cpuid >> 24) << 10) / assoc;
574 va_align.mask = (upperbit - 1) & PAGE_MASK;
575 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
577 /* A random value per boot for bit slice [12:upper_bit) */
578 va_align.bits = get_random_u32() & va_align.mask;
581 if (cpu_has(c, X86_FEATURE_MWAITX))
584 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
587 ecx = cpuid_ecx(0x8000001e);
588 __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
589 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
592 rdmsrl(MSR_FAM10H_NODE_ID, value);
593 __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
596 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
597 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
598 c->x86 >= 0x15 && c->x86 <= 0x17) {
602 case 0x15: bit = 54; break;
603 case 0x16: bit = 33; break;
604 case 0x17: bit = 10; break;
608 * Try to cache the base value so further operations can
609 * avoid RMW. If that faults, do not enable SSBD.
611 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
612 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
613 setup_force_cpu_cap(X86_FEATURE_SSBD);
614 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
618 resctrl_cpu_detect(c);
621 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
626 * BIOS support is required for SME and SEV.
627 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
628 * the SME physical address space reduction value.
629 * If BIOS has not enabled SME then don't advertise the
630 * SME feature (set in scattered.c).
631 * If the kernel has not enabled SME via any means then
632 * don't advertise the SME feature.
633 * For SEV: If BIOS has not enabled SEV then don't advertise the
634 * SEV and SEV_ES feature (set in scattered.c).
636 * In all cases, since support for SME and SEV requires long mode,
637 * don't advertise the feature under CONFIG_X86_32.
639 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
640 /* Check if memory encryption is enabled */
641 rdmsrl(MSR_AMD64_SYSCFG, msr);
642 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
646 * Always adjust physical address bits. Even though this
647 * will be a value above 32-bits this is still done for
648 * CONFIG_X86_32 so that accurate values are reported.
650 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
652 if (IS_ENABLED(CONFIG_X86_32))
656 setup_clear_cpu_cap(X86_FEATURE_SME);
658 rdmsrl(MSR_K7_HWCR, msr);
659 if (!(msr & MSR_K7_HWCR_SMMLOCK))
665 setup_clear_cpu_cap(X86_FEATURE_SME);
667 setup_clear_cpu_cap(X86_FEATURE_SEV);
668 setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
672 static void early_init_amd(struct cpuinfo_x86 *c)
677 early_init_amd_mc(c);
680 set_cpu_cap(c, X86_FEATURE_K8);
682 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
685 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
686 * with P/T states and does not stop in deep C-states
688 if (c->x86_power & (1 << 8)) {
689 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
690 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
693 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
694 if (c->x86_power & BIT(12))
695 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
697 /* Bit 14 indicates the Runtime Average Power Limit interface. */
698 if (c->x86_power & BIT(14))
699 set_cpu_cap(c, X86_FEATURE_RAPL);
702 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
704 /* Set MTRR capability flag if appropriate */
706 if (c->x86_model == 13 || c->x86_model == 9 ||
707 (c->x86_model == 8 && c->x86_stepping >= 8))
708 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
710 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
712 * ApicID can always be treated as an 8-bit value for AMD APIC versions
713 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
714 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
717 if (boot_cpu_has(X86_FEATURE_APIC)) {
719 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
720 else if (c->x86 >= 0xf) {
721 /* check CPU config space for extended APIC ID */
724 val = read_pci_config(0, 24, 0, 0x68);
725 if ((val >> 17 & 0x3) == 0x3)
726 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
732 * This is only needed to tell the kernel whether to use VMCALL
733 * and VMMCALL. VMMCALL is never executed except under virt, so
734 * we can set it unconditionally.
736 set_cpu_cap(c, X86_FEATURE_VMMCALL);
738 /* F16h erratum 793, CVE-2013-6885 */
739 if (c->x86 == 0x16 && c->x86_model <= 0xf)
740 msr_set_bit(MSR_AMD64_LS_CFG, 15);
743 * Check whether the machine is affected by erratum 400. This is
744 * used to select the proper idle routine and to enable the check
745 * whether the machine is affected in arch_post_acpi_init(), which
746 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
748 if (cpu_has_amd_erratum(c, amd_erratum_400))
749 set_cpu_bug(c, X86_BUG_AMD_E400);
751 early_detect_mem_encrypt(c);
753 /* Re-enable TopologyExtensions if switched off by BIOS */
754 if (c->x86 == 0x15 &&
755 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
756 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
758 if (msr_set_bit(0xc0011005, 54) > 0) {
759 rdmsrl(0xc0011005, value);
760 if (value & BIT_64(54)) {
761 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
762 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
767 if (cpu_has(c, X86_FEATURE_TOPOEXT))
768 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
771 static void init_amd_k8(struct cpuinfo_x86 *c)
776 /* On C+ stepping K8 rep microcode works well for copy/memset */
777 level = cpuid_eax(1);
778 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
779 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
782 * Some BIOSes incorrectly force this feature, but only K8 revision D
783 * (model = 0x14) and later actually support it.
784 * (AMD Erratum #110, docId: 25759).
786 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
787 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
788 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
789 value &= ~BIT_64(32);
790 wrmsrl_amd_safe(0xc001100d, value);
794 if (!c->x86_model_id[0])
795 strcpy(c->x86_model_id, "Hammer");
799 * Disable TLB flush filter by setting HWCR.FFDIS on K8
800 * bit 6 of msr C001_0015
802 * Errata 63 for SH-B3 steppings
803 * Errata 122 for all steppings (F+ have it disabled by default)
805 msr_set_bit(MSR_K7_HWCR, 6);
807 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
810 static void init_amd_gh(struct cpuinfo_x86 *c)
812 #ifdef CONFIG_MMCONF_FAM10H
813 /* do this for boot cpu */
814 if (c == &boot_cpu_data)
815 check_enable_amd_mmconf_dmi();
817 fam10h_check_enable_mmcfg();
821 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
822 * is always needed when GART is enabled, even in a kernel which has no
823 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
824 * If it doesn't, we do it here as suggested by the BKDG.
826 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
828 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
831 * On family 10h BIOS may not have properly enabled WC+ support, causing
832 * it to be converted to CD memtype. This may result in performance
833 * degradation for certain nested-paging guests. Prevent this conversion
834 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
836 * NOTE: we want to use the _safe accessors so as not to #GP kvm
837 * guests on older kvm hosts.
839 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
841 if (cpu_has_amd_erratum(c, amd_erratum_383))
842 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
845 static void init_amd_ln(struct cpuinfo_x86 *c)
848 * Apply erratum 665 fix unconditionally so machines without a BIOS
851 msr_set_bit(MSR_AMD64_DE_CFG, 31);
854 static bool rdrand_force;
856 static int __init rdrand_cmdline(char *str)
861 if (!strcmp(str, "force"))
868 early_param("rdrand", rdrand_cmdline);
870 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
873 * Saving of the MSR used to hide the RDRAND support during
874 * suspend/resume is done by arch/x86/power/cpu.c, which is
875 * dependent on CONFIG_PM_SLEEP.
877 if (!IS_ENABLED(CONFIG_PM_SLEEP))
881 * The self-test can clear X86_FEATURE_RDRAND, so check for
882 * RDRAND support using the CPUID function directly.
884 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
887 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
890 * Verify that the CPUID change has occurred in case the kernel is
891 * running virtualized and the hypervisor doesn't support the MSR.
893 if (cpuid_ecx(1) & BIT(30)) {
894 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
898 clear_cpu_cap(c, X86_FEATURE_RDRAND);
899 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
902 static void init_amd_jg(struct cpuinfo_x86 *c)
905 * Some BIOS implementations do not restore proper RDRAND support
906 * across suspend and resume. Check on whether to hide the RDRAND
907 * instruction support via CPUID.
909 clear_rdrand_cpuid_bit(c);
912 static void init_amd_bd(struct cpuinfo_x86 *c)
917 * The way access filter has a performance penalty on some workloads.
918 * Disable it on the affected CPUs.
920 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
921 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
923 wrmsrl_safe(MSR_F15H_IC_CFG, value);
928 * Some BIOS implementations do not restore proper RDRAND support
929 * across suspend and resume. Check on whether to hide the RDRAND
930 * instruction support via CPUID.
932 clear_rdrand_cpuid_bit(c);
935 void init_spectral_chicken(struct cpuinfo_x86 *c)
937 #ifdef CONFIG_CPU_UNRET_ENTRY
941 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
943 * This suppresses speculation from the middle of a basic block, i.e. it
944 * suppresses non-branch predictions.
946 * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
948 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
949 if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
950 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
951 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
956 * Work around Erratum 1386. The XSAVES instruction malfunctions in
957 * certain circumstances on Zen1/2 uarch, and not all parts have had
958 * updated microcode at the time of writing (March 2023).
960 * Affected parts all have no supervisor XSAVE states, meaning that
961 * the XSAVEC instruction (which works fine) is equivalent.
963 clear_cpu_cap(c, X86_FEATURE_XSAVES);
966 static void init_amd_zn(struct cpuinfo_x86 *c)
968 set_cpu_cap(c, X86_FEATURE_ZEN);
971 node_reclaim_distance = 32;
974 /* Fix up CPUID bits, but only if not virtualised. */
975 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
977 /* Erratum 1076: CPB feature bit not being set in CPUID. */
978 if (!cpu_has(c, X86_FEATURE_CPB))
979 set_cpu_cap(c, X86_FEATURE_CPB);
982 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
983 * Branch Type Confusion, but predate the allocation of the
986 if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
987 set_cpu_cap(c, X86_FEATURE_BTC_NO);
991 static bool cpu_has_zenbleed_microcode(void)
995 switch (boot_cpu_data.x86_model) {
996 case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
997 case 0x60 ... 0x67: good_rev = 0x0860010b; break;
998 case 0x68 ... 0x6f: good_rev = 0x08608105; break;
999 case 0x70 ... 0x7f: good_rev = 0x08701032; break;
1000 case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
1007 if (boot_cpu_data.microcode < good_rev)
1013 static void zenbleed_check(struct cpuinfo_x86 *c)
1015 if (!cpu_has_amd_erratum(c, amd_zenbleed))
1018 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
1021 if (!cpu_has(c, X86_FEATURE_AVX))
1024 if (!cpu_has_zenbleed_microcode()) {
1025 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
1026 msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
1028 msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
1032 static void init_amd(struct cpuinfo_x86 *c)
1037 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1038 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
1040 clear_cpu_cap(c, 0*32+31);
1043 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
1045 /* AMD FSRM also implies FSRS */
1046 if (cpu_has(c, X86_FEATURE_FSRM))
1047 set_cpu_cap(c, X86_FEATURE_FSRS);
1049 /* get apicid instead of initial apic id from cpuid */
1050 c->apicid = read_apic_id();
1052 /* K6s reports MCEs but don't actually have all the MSRs */
1054 clear_cpu_cap(c, X86_FEATURE_MCE);
1057 case 4: init_amd_k5(c); break;
1058 case 5: init_amd_k6(c); break;
1059 case 6: init_amd_k7(c); break;
1060 case 0xf: init_amd_k8(c); break;
1061 case 0x10: init_amd_gh(c); break;
1062 case 0x12: init_amd_ln(c); break;
1063 case 0x15: init_amd_bd(c); break;
1064 case 0x16: init_amd_jg(c); break;
1065 case 0x17: init_spectral_chicken(c);
1067 case 0x19: init_amd_zn(c); break;
1071 * Enable workaround for FXSAVE leak on CPUs
1072 * without a XSaveErPtr feature
1074 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
1075 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
1077 cpu_detect_cache_sizes(c);
1080 amd_get_topology(c);
1081 srat_detect_node(c);
1083 init_amd_cacheinfo(c);
1085 if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
1087 * Use LFENCE for execution serialization. On families which
1088 * don't have that MSR, LFENCE is already serializing.
1089 * msr_set_bit() uses the safe accessors, too, even if the MSR
1092 msr_set_bit(MSR_AMD64_DE_CFG,
1093 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1095 /* A serializing LFENCE stops RDTSC speculation */
1096 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1100 * Family 0x12 and above processors have APIC timer
1101 * running in deep C states.
1104 set_cpu_cap(c, X86_FEATURE_ARAT);
1106 /* 3DNow or LM implies PREFETCHW */
1107 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1108 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1109 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1111 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1112 if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1113 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1116 * Turn on the Instructions Retired free counter on machines not
1117 * susceptible to erratum #1054 "Instructions Retired Performance
1118 * Counter May Be Inaccurate".
1120 if (cpu_has(c, X86_FEATURE_IRPERF) &&
1121 !cpu_has_amd_erratum(c, amd_erratum_1054))
1122 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1124 check_null_seg_clears_base(c);
1127 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1128 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1129 * order to be replicated onto them. Regardless, set it here again, if not set,
1130 * to protect against any future refactoring/code reorganization which might
1131 * miss setting this important bit.
1133 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1134 cpu_has(c, X86_FEATURE_AUTOIBRS))
1135 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
1139 if (cpu_has_amd_erratum(c, amd_div0)) {
1140 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
1141 setup_force_cpu_bug(X86_BUG_DIV0);
1145 #ifdef CONFIG_X86_32
1146 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1148 /* AMD errata T13 (order #21922) */
1151 if (c->x86_model == 3 && c->x86_stepping == 0)
1153 /* Tbird rev A1/A2 */
1154 if (c->x86_model == 4 &&
1155 (c->x86_stepping == 0 || c->x86_stepping == 1))
1162 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1164 u32 ebx, eax, ecx, edx;
1170 if (c->extended_cpuid_level < 0x80000006)
1173 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1175 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1176 tlb_lli_4k[ENTRIES] = ebx & mask;
1179 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1180 * characteristics from the CPUID function 0x80000005 instead.
1182 if (c->x86 == 0xf) {
1183 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1187 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1188 if (!((eax >> 16) & mask))
1189 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1191 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1193 /* a 4M entry uses two 2M entries */
1194 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1196 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1197 if (!(eax & mask)) {
1199 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1200 tlb_lli_2m[ENTRIES] = 1024;
1202 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1203 tlb_lli_2m[ENTRIES] = eax & 0xff;
1206 tlb_lli_2m[ENTRIES] = eax & mask;
1208 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1211 static const struct cpu_dev amd_cpu_dev = {
1213 .c_ident = { "AuthenticAMD" },
1214 #ifdef CONFIG_X86_32
1216 { .family = 4, .model_names =
1219 [7] = "486 DX/2-WB",
1221 [9] = "486 DX/4-WB",
1227 .legacy_cache_size = amd_size_cache,
1229 .c_early_init = early_init_amd,
1230 .c_detect_tlb = cpu_detect_tlb_amd,
1231 .c_bsp_init = bsp_init_amd,
1233 .c_x86_vendor = X86_VENDOR_AMD,
1236 cpu_dev_register(amd_cpu_dev);
1238 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1240 static unsigned int amd_msr_dr_addr_masks[] = {
1241 MSR_F16H_DR0_ADDR_MASK,
1242 MSR_F16H_DR1_ADDR_MASK,
1243 MSR_F16H_DR1_ADDR_MASK + 1,
1244 MSR_F16H_DR1_ADDR_MASK + 2
1247 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1249 int cpu = smp_processor_id();
1251 if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1254 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1257 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1260 wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1261 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1264 unsigned long amd_get_dr_addr_mask(unsigned int dr)
1266 if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1269 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1272 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1274 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1276 u32 amd_get_highest_perf(void)
1278 struct cpuinfo_x86 *c = &boot_cpu_data;
1280 if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1281 (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1284 if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1285 (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1290 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1292 static void zenbleed_check_cpu(void *unused)
1294 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1299 void amd_check_microcode(void)
1301 on_each_cpu(zenbleed_check_cpu, NULL, 1);
1304 bool cpu_has_ibpb_brtype_microcode(void)
1306 switch (boot_cpu_data.x86) {
1307 /* Zen1/2 IBPB flushes branch type predictions too. */
1309 return boot_cpu_has(X86_FEATURE_AMD_IBPB);
1311 /* Poke the MSR bit on Zen3/4 to check its presence. */
1312 if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
1313 setup_force_cpu_cap(X86_FEATURE_SBPB);
1324 * Issue a DIV 0/1 insn to clear any division data from previous DIV
1327 void noinstr amd_clear_divider(void)
1329 asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1330 :: "a" (0), "d" (0), "r" (1));
1332 EXPORT_SYMBOL_GPL(amd_clear_divider);