1 #include <linux/init.h>
2 #include <linux/bitops.h>
6 #include <asm/processor.h>
9 #include <asm/pci-direct.h>
12 # include <asm/numa_64.h>
13 # include <asm/mmconfig.h>
14 # include <asm/cacheflush.h>
21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
22 * misexecution of code under Linux. Owners of such processors should
23 * contact AMD for precise details and a CPU swap.
25 * See http://www.multimania.com/poulot/k6bug.html
26 * http://www.amd.com/K6/k6docs/revgd.html
28 * The following test is erm.. interesting. AMD neglected to up
29 * the chip setting when fixing the bug but they also tweaked some
30 * performance at the same time..
33 extern void vide(void);
34 __asm__(".align 4\nvide: ret");
36 static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
39 * General Systems BIOSen alias the cpu frequency registers
40 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
41 * drivers subsequently pokes it, and changes the CPU speed.
42 * Workaround : Remove the unneeded alias.
44 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
45 #define CBAR_ENB (0x80000000)
46 #define CBAR_KEY (0X000000CB)
47 if (c->x86_model == 9 || c->x86_model == 10) {
48 if (inl(CBAR) & CBAR_ENB)
49 outl(0 | CBAR_KEY, CBAR);
54 static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
57 int mbytes = num_physpages >> (20-PAGE_SHIFT);
59 if (c->x86_model < 6) {
60 /* Based on AMD doc 20734R - June 2000 */
61 if (c->x86_model == 0) {
62 clear_cpu_cap(c, X86_FEATURE_APIC);
63 set_cpu_cap(c, X86_FEATURE_PGE);
68 if (c->x86_model == 6 && c->x86_mask == 1) {
69 const int K6_BUG_LOOP = 1000000;
74 printk(KERN_INFO "AMD K6 stepping B detected - ");
77 * It looks like AMD fixed the 2.6.2 bug and improved indirect
78 * calls at the same time.
89 if (d > 20*K6_BUG_LOOP)
91 "system stability may be impaired when more than 32 MB are used.\n");
93 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
94 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
97 /* K6 with old style WHCR */
98 if (c->x86_model < 8 ||
99 (c->x86_model == 8 && c->x86_mask < 8)) {
100 /* We can only write allocate on the low 508Mb */
104 rdmsr(MSR_K6_WHCR, l, h);
105 if ((l&0x0000FFFF) == 0) {
107 l = (1<<0)|((mbytes/4)<<1);
108 local_irq_save(flags);
110 wrmsr(MSR_K6_WHCR, l, h);
111 local_irq_restore(flags);
112 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
118 if ((c->x86_model == 8 && c->x86_mask > 7) ||
119 c->x86_model == 9 || c->x86_model == 13) {
120 /* The more serious chips .. */
125 rdmsr(MSR_K6_WHCR, l, h);
126 if ((l&0xFFFF0000) == 0) {
128 l = ((mbytes>>2)<<22)|(1<<16);
129 local_irq_save(flags);
131 wrmsr(MSR_K6_WHCR, l, h);
132 local_irq_restore(flags);
133 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
140 if (c->x86_model == 10) {
141 /* AMD Geode LX is model 10 */
142 /* placeholder for any needed mods */
147 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
149 /* calling is from identify_secondary_cpu() ? */
154 * Certain Athlons might work (for various values of 'work') in SMP
155 * but they are not certified as MP capable.
157 /* Athlon 660/661 is valid. */
158 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
162 /* Duron 670 is valid */
163 if ((c->x86_model == 7) && (c->x86_mask == 0))
167 * Athlon 662, Duron 671, and Athlon >model 7 have capability
168 * bit. It's worth noting that the A5 stepping (662) of some
169 * Athlon XP's have the MP bit set.
170 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
173 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
174 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
179 /* If we get here, not a certified SMP capable AMD system. */
182 * Don't taint if we are running SMP kernel on a single non-MP
185 WARN_ONCE(1, "WARNING: This combination of AMD"
186 " processors is not suitable for SMP.\n");
187 if (!test_taint(TAINT_UNSAFE_SMP))
188 add_taint(TAINT_UNSAFE_SMP);
194 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
199 * Bit 15 of Athlon specific MSR 15, needs to be 0
200 * to enable SSE on Palomino/Morgan/Barton CPU's.
201 * If the BIOS didn't enable it already, enable it here.
203 if (c->x86_model >= 6 && c->x86_model <= 10) {
204 if (!cpu_has(c, X86_FEATURE_XMM)) {
205 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
206 rdmsr(MSR_K7_HWCR, l, h);
208 wrmsr(MSR_K7_HWCR, l, h);
209 set_cpu_cap(c, X86_FEATURE_XMM);
214 * It's been determined by AMD that Athlons since model 8 stepping 1
215 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
216 * As per AMD technical note 27212 0.2
218 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
219 rdmsr(MSR_K7_CLK_CTL, l, h);
220 if ((l & 0xfff00000) != 0x20000000) {
222 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
223 l, ((l & 0x000fffff)|0x20000000));
224 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
228 set_cpu_cap(c, X86_FEATURE_K7);
236 * To workaround broken NUMA config. Read the comment in
237 * srat_detect_node().
239 static int __cpuinit nearby_node(int apicid)
243 for (i = apicid - 1; i >= 0; i--) {
244 node = __apicid_to_node[i];
245 if (node != NUMA_NO_NODE && node_online(node))
248 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
249 node = __apicid_to_node[i];
250 if (node != NUMA_NO_NODE && node_online(node))
253 return first_node(node_online_map); /* Shouldn't happen */
258 * Fixup core topology information for
259 * (1) AMD multi-node processors
260 * Assumption: Number of cores in each internal node is the same.
261 * (2) AMD processors supporting compute units
264 static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
266 u32 nodes, cores_per_cu = 1;
268 int cpu = smp_processor_id();
270 /* get information required for multi-node processors */
271 if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
272 u32 eax, ebx, ecx, edx;
274 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
275 nodes = ((ecx >> 8) & 7) + 1;
278 /* get compute unit information */
279 smp_num_siblings = ((ebx >> 8) & 3) + 1;
280 c->compute_unit_id = ebx & 0xff;
281 cores_per_cu += ((ebx >> 8) & 3);
282 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
285 rdmsrl(MSR_FAM10H_NODE_ID, value);
286 nodes = ((value >> 3) & 7) + 1;
291 /* fixup multi-node processor information */
296 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
297 cores_per_node = c->x86_max_cores / nodes;
298 cus_per_node = cores_per_node / cores_per_cu;
300 /* store NodeID, use llc_shared_map to store sibling info */
301 per_cpu(cpu_llc_id, cpu) = node_id;
303 /* core id has to be in the [0 .. cores_per_node - 1] range */
304 c->cpu_core_id %= cores_per_node;
305 c->compute_unit_id %= cus_per_node;
311 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
312 * Assumes number of cores is a power of two.
314 static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
318 int cpu = smp_processor_id();
320 bits = c->x86_coreid_bits;
321 /* Low order bits define the core id (index of core in socket) */
322 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
323 /* Convert the initial APIC ID into the socket ID */
324 c->phys_proc_id = c->initial_apicid >> bits;
325 /* use socket ID also for last level cache */
326 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
331 int amd_get_nb_id(int cpu)
335 id = per_cpu(cpu_llc_id, cpu);
339 EXPORT_SYMBOL_GPL(amd_get_nb_id);
341 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
344 int cpu = smp_processor_id();
346 unsigned apicid = c->apicid;
348 node = numa_cpu_node(cpu);
349 if (node == NUMA_NO_NODE)
350 node = per_cpu(cpu_llc_id, cpu);
352 if (!node_online(node)) {
354 * Two possibilities here:
356 * - The CPU is missing memory and no node was created. In
357 * that case try picking one from a nearby CPU.
359 * - The APIC IDs differ from the HyperTransport node IDs
360 * which the K8 northbridge parsing fills in. Assume
361 * they are all increased by a constant offset, but in
362 * the same order as the HT nodeids. If that doesn't
363 * result in a usable node fall back to the path for the
366 * This workaround operates directly on the mapping between
367 * APIC ID and NUMA node, assuming certain relationship
368 * between APIC ID, HT node ID and NUMA topology. As going
369 * through CPU mapping may alter the outcome, directly
370 * access __apicid_to_node[].
372 int ht_nodeid = c->initial_apicid;
374 if (ht_nodeid >= 0 &&
375 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
376 node = __apicid_to_node[ht_nodeid];
377 /* Pick a nearby node */
378 if (!node_online(node))
379 node = nearby_node(apicid);
381 numa_set_node(cpu, node);
385 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
390 /* Multi core CPU? */
391 if (c->extended_cpuid_level < 0x80000008)
394 ecx = cpuid_ecx(0x80000008);
396 c->x86_max_cores = (ecx & 0xff) + 1;
398 /* CPU telling us the core id bits shift? */
399 bits = (ecx >> 12) & 0xF;
401 /* Otherwise recompute */
403 while ((1 << bits) < c->x86_max_cores)
407 c->x86_coreid_bits = bits;
411 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
413 early_init_amd_mc(c);
416 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
417 * with P/T states and does not stop in deep C-states
419 if (c->x86_power & (1 << 8)) {
420 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
421 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
425 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
427 /* Set MTRR capability flag if appropriate */
429 if (c->x86_model == 13 || c->x86_model == 9 ||
430 (c->x86_model == 8 && c->x86_mask >= 8))
431 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
433 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
434 /* check CPU config space for extended APIC ID */
435 if (cpu_has_apic && c->x86 >= 0xf) {
437 val = read_pci_config(0, 24, 0, 0x68);
438 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
439 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
443 /* We need to do the following only once */
444 if (c != &boot_cpu_data)
447 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
450 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
453 rdmsrl(MSR_K7_HWCR, val);
454 if (!(val & BIT(24)))
455 printk(KERN_WARNING FW_BUG "TSC doesn't count "
456 "with P0 frequency!\n");
461 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
464 unsigned long long value;
467 * Disable TLB flush filter by setting HWCR.FFDIS on K8
468 * bit 6 of msr C001_0015
470 * Errata 63 for SH-B3 steppings
471 * Errata 122 for all steppings (F+ have it disabled by default)
474 rdmsrl(MSR_K7_HWCR, value);
476 wrmsrl(MSR_K7_HWCR, value);
483 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
484 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
486 clear_cpu_cap(c, 0*32+31);
489 /* On C+ stepping K8 rep microcode works well for copy/memset */
493 level = cpuid_eax(1);
494 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
495 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
498 * Some BIOSes incorrectly force this feature, but only K8
499 * revision D (model = 0x14) and later actually support it.
500 * (AMD Erratum #110, docId: 25759).
502 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
505 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
506 if (!rdmsrl_amd_safe(0xc001100d, &val)) {
507 val &= ~(1ULL << 32);
508 wrmsrl_amd_safe(0xc001100d, val);
514 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
516 /* get apicid instead of initial apic id from cpuid */
517 c->apicid = hard_smp_processor_id();
521 * FIXME: We should handle the K5 here. Set up the write
522 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
533 case 6: /* An Athlon/Duron */
538 /* K6s reports MCEs but don't actually have all the MSRs */
540 clear_cpu_cap(c, X86_FEATURE_MCE);
543 /* Enable workaround for FXSAVE leak */
545 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
547 if (!c->x86_model_id[0]) {
550 /* Should distinguish Models here, but this is only
551 a fallback anyways. */
552 strcpy(c->x86_model_id, "Hammer");
558 * The way access filter has a performance penalty on some workloads.
559 * Disable it on the affected CPUs.
561 if ((c->x86 == 0x15) &&
562 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
565 if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
567 checking_wrmsrl(0xc0011021, val);
571 cpu_detect_cache_sizes(c);
573 /* Multi core CPU? */
574 if (c->extended_cpuid_level >= 0x80000008) {
583 if (c->extended_cpuid_level >= 0x80000006) {
584 if (cpuid_edx(0x80000006) & 0xf000)
585 num_cache_leaves = 4;
587 num_cache_leaves = 3;
591 set_cpu_cap(c, X86_FEATURE_K8);
594 /* MFENCE stops RDTSC speculation */
595 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
599 if (c->x86 == 0x10) {
600 /* do this for boot cpu */
601 if (c == &boot_cpu_data)
602 check_enable_amd_mmconf_dmi();
604 fam10h_check_enable_mmcfg();
607 if (c == &boot_cpu_data && c->x86 >= 0xf) {
608 unsigned long long tseg;
611 * Split up direct mapping around the TSEG SMM area.
612 * Don't do it for gbpages because there seems very little
613 * benefit in doing so.
615 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
616 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
617 if ((tseg>>PMD_SHIFT) <
618 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
620 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
621 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
622 set_memory_4k((unsigned long)__va(tseg), 1);
628 * Family 0x12 and above processors have APIC timer
629 * running in deep C states.
632 set_cpu_cap(c, X86_FEATURE_ARAT);
635 * Disable GART TLB Walk Errors on Fam10h. We do this here
636 * because this is always needed when GART is enabled, even in a
637 * kernel which has no MCE support built in.
639 if (c->x86 == 0x10) {
641 * BIOS should disable GartTlbWlk Errors themself. If
642 * it doesn't do it here as suggested by the BKDG.
644 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
649 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
652 checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
658 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
661 /* AMD errata T13 (order #21922) */
664 if (c->x86_model == 3 && c->x86_mask == 0)
666 /* Tbird rev A1/A2 */
667 if (c->x86_model == 4 &&
668 (c->x86_mask == 0 || c->x86_mask == 1))
675 static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
677 .c_ident = { "AuthenticAMD" },
680 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
691 .c_size_cache = amd_size_cache,
693 .c_early_init = early_init_amd,
695 .c_x86_vendor = X86_VENDOR_AMD,
698 cpu_dev_register(amd_cpu_dev);
701 * AMD errata checking
703 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
704 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
705 * have an OSVW id assigned, which it takes as first argument. Both take a
706 * variable number of family-specific model-stepping ranges created by
707 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
708 * int[] in arch/x86/include/asm/processor.h.
712 * const int amd_erratum_319[] =
713 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
714 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
715 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
718 const int amd_erratum_400[] =
719 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
720 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
721 EXPORT_SYMBOL_GPL(amd_erratum_400);
723 const int amd_erratum_383[] =
724 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
725 EXPORT_SYMBOL_GPL(amd_erratum_383);
727 bool cpu_has_amd_erratum(const int *erratum)
729 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
730 int osvw_id = *erratum++;
735 * If called early enough that current_cpu_data hasn't been initialized
736 * yet, fall back to boot_cpu_data.
739 cpu = &boot_cpu_data;
741 if (cpu->x86_vendor != X86_VENDOR_AMD)
744 if (osvw_id >= 0 && osvw_id < 65536 &&
745 cpu_has(cpu, X86_FEATURE_OSVW)) {
748 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
749 if (osvw_id < osvw_len) {
752 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
754 return osvw_bits & (1ULL << (osvw_id & 0x3f));
758 /* OSVW unavailable or ID unknown, match family-model-stepping range */
759 ms = (cpu->x86_model << 4) | cpu->x86_mask;
760 while ((range = *erratum++))
761 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
762 (ms >= AMD_MODEL_RANGE_START(range)) &&
763 (ms <= AMD_MODEL_RANGE_END(range)))
769 EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);