2 * P4 specific Machine Check Exception Reporting
5 #include <linux/init.h>
6 #include <linux/types.h>
7 #include <linux/kernel.h>
8 #include <linux/interrupt.h>
11 #include <asm/processor.h>
12 #include <asm/system.h>
17 #include <asm/therm_throt.h>
21 /* as supported by the P4/Xeon family */
22 struct intel_mce_extended_msrs {
33 /* u32 *reserved[]; */
36 static int mce_num_extended_msrs = 0;
39 #ifdef CONFIG_X86_MCE_P4THERMAL
40 static void unexpected_thermal_interrupt(struct pt_regs *regs)
42 printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
44 add_taint(TAINT_MACHINE_CHECK);
47 /* P4/Xeon Thermal transition interrupt handler */
48 static void intel_thermal_interrupt(struct pt_regs *regs)
54 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
55 therm_throt_process(msr_val & 0x1);
58 /* Thermal interrupt handler for this CPU setup */
59 static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;
61 fastcall void smp_thermal_interrupt(struct pt_regs *regs)
65 vendor_thermal_interrupt(regs);
69 /* P4/Xeon Thermal regulation detect and init */
70 static void intel_init_thermal(struct cpuinfo_x86 *c)
73 unsigned int cpu = smp_processor_id();
75 /* Thermal monitoring */
76 if (!cpu_has(c, X86_FEATURE_ACPI))
79 /* Clock modulation */
80 if (!cpu_has(c, X86_FEATURE_ACC))
83 /* first check if its enabled already, in which case there might
84 * be some SMM goo which handles it, so we can't even put a handler
85 * since it might be delivered via SMI already -zwanem.
87 rdmsr (MSR_IA32_MISC_ENABLE, l, h);
88 h = apic_read(APIC_LVTTHMR);
89 if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
90 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
95 /* check whether a vector already exists, temporarily masked? */
96 if (h & APIC_VECTOR_MASK) {
97 printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already "
99 cpu, (h & APIC_VECTOR_MASK));
103 /* The temperature transition interrupt handler setup */
104 h = THERMAL_APIC_VECTOR; /* our delivery vector */
105 h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
106 apic_write_around(APIC_LVTTHMR, h);
108 rdmsr (MSR_IA32_THERM_INTERRUPT, l, h);
109 wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
111 /* ok we're good to go... */
112 vendor_thermal_interrupt = intel_thermal_interrupt;
114 rdmsr (MSR_IA32_MISC_ENABLE, l, h);
115 wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h);
117 l = apic_read (APIC_LVTTHMR);
118 apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
119 printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
121 /* enable thermal throttle processing */
122 atomic_set(&therm_throt_en, 1);
125 #endif /* CONFIG_X86_MCE_P4THERMAL */
128 /* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
129 static inline int intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
133 if (mce_num_extended_msrs == 0)
136 rdmsr (MSR_IA32_MCG_EAX, r->eax, h);
137 rdmsr (MSR_IA32_MCG_EBX, r->ebx, h);
138 rdmsr (MSR_IA32_MCG_ECX, r->ecx, h);
139 rdmsr (MSR_IA32_MCG_EDX, r->edx, h);
140 rdmsr (MSR_IA32_MCG_ESI, r->esi, h);
141 rdmsr (MSR_IA32_MCG_EDI, r->edi, h);
142 rdmsr (MSR_IA32_MCG_EBP, r->ebp, h);
143 rdmsr (MSR_IA32_MCG_ESP, r->esp, h);
144 rdmsr (MSR_IA32_MCG_EFLAGS, r->eflags, h);
145 rdmsr (MSR_IA32_MCG_EIP, r->eip, h);
147 /* can we rely on kmalloc to do a dynamic
148 * allocation for the reserved registers?
151 return mce_num_extended_msrs;
154 static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
157 u32 alow, ahigh, high, low;
160 struct intel_mce_extended_msrs dbg;
162 rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
163 if (mcgstl & (1<<0)) /* Recoverable ? */
166 printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
167 smp_processor_id(), mcgsth, mcgstl);
169 if (intel_get_extended_msrs(&dbg)) {
170 printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
171 smp_processor_id(), dbg.eip, dbg.eflags);
172 printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
173 dbg.eax, dbg.ebx, dbg.ecx, dbg.edx);
174 printk (KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
175 dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
178 for (i=0; i<nr_mce_banks; i++) {
179 rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
180 if (high & (1<<31)) {
185 printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
187 if (high & (1<<27)) {
188 rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
189 printk ("[%08x%08x]", ahigh, alow);
191 if (high & (1<<26)) {
192 rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
193 printk (" at %08x%08x", ahigh, alow);
200 panic ("CPU context corrupt");
202 panic ("Unable to continue");
204 printk(KERN_EMERG "Attempting to continue.\n");
206 * Do not clear the MSR_IA32_MCi_STATUS if the error is not
207 * recoverable/continuable.This will allow BIOS to look at the MSRs
208 * for errors if the OS could not log the error.
210 for (i=0; i<nr_mce_banks; i++) {
212 msr = MSR_IA32_MC0_STATUS+i*4;
213 rdmsr (msr, low, high);
216 wrmsr(msr, 0UL, 0UL);
219 add_taint(TAINT_MACHINE_CHECK);
223 wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
227 void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
232 machine_check_vector = intel_machine_check;
235 printk (KERN_INFO "Intel machine check architecture supported.\n");
236 rdmsr (MSR_IA32_MCG_CAP, l, h);
237 if (l & (1<<8)) /* Control register present ? */
238 wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
239 nr_mce_banks = l & 0xff;
241 for (i=0; i<nr_mce_banks; i++) {
242 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
243 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
246 set_in_cr4 (X86_CR4_MCE);
247 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
250 /* Check for P4/Xeon extended MCE MSRs */
251 rdmsr (MSR_IA32_MCG_CAP, l, h);
252 if (l & (1<<9)) {/* MCG_EXT_P */
253 mce_num_extended_msrs = (l >> 16) & 0xff;
254 printk (KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
256 smp_processor_id(), mce_num_extended_msrs);
258 #ifdef CONFIG_X86_MCE_P4THERMAL
259 /* Check for P4/Xeon Thermal monitor */
260 intel_init_thermal(c);