Merge branches 'acpi-tools' and 'pm-tools'
[platform/kernel/linux-stable.git] / drivers / acpi / processor_core.c
1 /*
2  * Copyright (C) 2005 Intel Corporation
3  * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
4  *
5  *      Alex Chiang <achiang@hp.com>
6  *      - Unified x86/ia64 implementations
7  *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
8  *      - Added _PDC for platforms with Intel CPUs
9  */
10 #include <linux/export.h>
11 #include <linux/dmi.h>
12 #include <linux/slab.h>
13 #include <linux/acpi.h>
14 #include <acpi/processor.h>
15
16 #include "internal.h"
17
18 #define PREFIX                  "ACPI: "
19 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
20 ACPI_MODULE_NAME("processor_core");
21
22 static int __init set_no_mwait(const struct dmi_system_id *id)
23 {
24         printk(KERN_NOTICE PREFIX "%s detected - "
25                 "disabling mwait for CPU C-states\n", id->ident);
26         boot_option_idle_override = IDLE_NOMWAIT;
27         return 0;
28 }
29
30 static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
31         {
32         set_no_mwait, "Extensa 5220", {
33         DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
34         DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
35         DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
36         DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
37         {},
38 };
39
40 static int map_lapic_id(struct acpi_subtable_header *entry,
41                  u32 acpi_id, int *apic_id)
42 {
43         struct acpi_madt_local_apic *lapic =
44                 (struct acpi_madt_local_apic *)entry;
45
46         if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
47                 return 0;
48
49         if (lapic->processor_id != acpi_id)
50                 return 0;
51
52         *apic_id = lapic->id;
53         return 1;
54 }
55
56 static int map_x2apic_id(struct acpi_subtable_header *entry,
57                          int device_declaration, u32 acpi_id, int *apic_id)
58 {
59         struct acpi_madt_local_x2apic *apic =
60                 (struct acpi_madt_local_x2apic *)entry;
61
62         if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
63                 return 0;
64
65         if (device_declaration && (apic->uid == acpi_id)) {
66                 *apic_id = apic->local_apic_id;
67                 return 1;
68         }
69
70         return 0;
71 }
72
73 static int map_lsapic_id(struct acpi_subtable_header *entry,
74                 int device_declaration, u32 acpi_id, int *apic_id)
75 {
76         struct acpi_madt_local_sapic *lsapic =
77                 (struct acpi_madt_local_sapic *)entry;
78
79         if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
80                 return 0;
81
82         if (device_declaration) {
83                 if ((entry->length < 16) || (lsapic->uid != acpi_id))
84                         return 0;
85         } else if (lsapic->processor_id != acpi_id)
86                 return 0;
87
88         *apic_id = (lsapic->id << 8) | lsapic->eid;
89         return 1;
90 }
91
92 static int map_madt_entry(int type, u32 acpi_id)
93 {
94         unsigned long madt_end, entry;
95         static struct acpi_table_madt *madt;
96         static int read_madt;
97         int apic_id = -1;
98
99         if (!read_madt) {
100                 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
101                                         (struct acpi_table_header **)&madt)))
102                         madt = NULL;
103                 read_madt++;
104         }
105
106         if (!madt)
107                 return apic_id;
108
109         entry = (unsigned long)madt;
110         madt_end = entry + madt->header.length;
111
112         /* Parse all entries looking for a match. */
113
114         entry += sizeof(struct acpi_table_madt);
115         while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
116                 struct acpi_subtable_header *header =
117                         (struct acpi_subtable_header *)entry;
118                 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
119                         if (map_lapic_id(header, acpi_id, &apic_id))
120                                 break;
121                 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
122                         if (map_x2apic_id(header, type, acpi_id, &apic_id))
123                                 break;
124                 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
125                         if (map_lsapic_id(header, type, acpi_id, &apic_id))
126                                 break;
127                 }
128                 entry += header->length;
129         }
130         return apic_id;
131 }
132
133 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
134 {
135         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
136         union acpi_object *obj;
137         struct acpi_subtable_header *header;
138         int apic_id = -1;
139
140         if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
141                 goto exit;
142
143         if (!buffer.length || !buffer.pointer)
144                 goto exit;
145
146         obj = buffer.pointer;
147         if (obj->type != ACPI_TYPE_BUFFER ||
148             obj->buffer.length < sizeof(struct acpi_subtable_header)) {
149                 goto exit;
150         }
151
152         header = (struct acpi_subtable_header *)obj->buffer.pointer;
153         if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
154                 map_lapic_id(header, acpi_id, &apic_id);
155         } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
156                 map_lsapic_id(header, type, acpi_id, &apic_id);
157         }
158
159 exit:
160         kfree(buffer.pointer);
161         return apic_id;
162 }
163
164 int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
165 {
166         int apic_id;
167
168         apic_id = map_mat_entry(handle, type, acpi_id);
169         if (apic_id == -1)
170                 apic_id = map_madt_entry(type, acpi_id);
171
172         return apic_id;
173 }
174
175 int acpi_map_cpuid(int apic_id, u32 acpi_id)
176 {
177 #ifdef CONFIG_SMP
178         int i;
179 #endif
180
181         if (apic_id == -1) {
182                 /*
183                  * On UP processor, there is no _MAT or MADT table.
184                  * So above apic_id is always set to -1.
185                  *
186                  * BIOS may define multiple CPU handles even for UP processor.
187                  * For example,
188                  *
189                  * Scope (_PR)
190                  * {
191                  *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
192                  *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
193                  *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
194                  *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
195                  * }
196                  *
197                  * Ignores apic_id and always returns 0 for the processor
198                  * handle with acpi id 0 if nr_cpu_ids is 1.
199                  * This should be the case if SMP tables are not found.
200                  * Return -1 for other CPU's handle.
201                  */
202                 if (nr_cpu_ids <= 1 && acpi_id == 0)
203                         return acpi_id;
204                 else
205                         return apic_id;
206         }
207
208 #ifdef CONFIG_SMP
209         for_each_possible_cpu(i) {
210                 if (cpu_physical_id(i) == apic_id)
211                         return i;
212         }
213 #else
214         /* In UP kernel, only processor 0 is valid */
215         if (apic_id == 0)
216                 return apic_id;
217 #endif
218         return -1;
219 }
220
221 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
222 {
223         int apic_id;
224
225         apic_id = acpi_get_apicid(handle, type, acpi_id);
226
227         return acpi_map_cpuid(apic_id, acpi_id);
228 }
229 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
230
231 static bool __init processor_physically_present(acpi_handle handle)
232 {
233         int cpuid, type;
234         u32 acpi_id;
235         acpi_status status;
236         acpi_object_type acpi_type;
237         unsigned long long tmp;
238         union acpi_object object = { 0 };
239         struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
240
241         status = acpi_get_type(handle, &acpi_type);
242         if (ACPI_FAILURE(status))
243                 return false;
244
245         switch (acpi_type) {
246         case ACPI_TYPE_PROCESSOR:
247                 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
248                 if (ACPI_FAILURE(status))
249                         return false;
250                 acpi_id = object.processor.proc_id;
251                 break;
252         case ACPI_TYPE_DEVICE:
253                 status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
254                 if (ACPI_FAILURE(status))
255                         return false;
256                 acpi_id = tmp;
257                 break;
258         default:
259                 return false;
260         }
261
262         type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
263         cpuid = acpi_get_cpuid(handle, type, acpi_id);
264
265         if (cpuid == -1)
266                 return false;
267
268         return true;
269 }
270
271 static void acpi_set_pdc_bits(u32 *buf)
272 {
273         buf[0] = ACPI_PDC_REVISION_ID;
274         buf[1] = 1;
275
276         /* Enable coordination with firmware's _TSD info */
277         buf[2] = ACPI_PDC_SMP_T_SWCOORD;
278
279         /* Twiddle arch-specific bits needed for _PDC */
280         arch_acpi_set_pdc_bits(buf);
281 }
282
283 static struct acpi_object_list *acpi_processor_alloc_pdc(void)
284 {
285         struct acpi_object_list *obj_list;
286         union acpi_object *obj;
287         u32 *buf;
288
289         /* allocate and initialize pdc. It will be used later. */
290         obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
291         if (!obj_list) {
292                 printk(KERN_ERR "Memory allocation error\n");
293                 return NULL;
294         }
295
296         obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
297         if (!obj) {
298                 printk(KERN_ERR "Memory allocation error\n");
299                 kfree(obj_list);
300                 return NULL;
301         }
302
303         buf = kmalloc(12, GFP_KERNEL);
304         if (!buf) {
305                 printk(KERN_ERR "Memory allocation error\n");
306                 kfree(obj);
307                 kfree(obj_list);
308                 return NULL;
309         }
310
311         acpi_set_pdc_bits(buf);
312
313         obj->type = ACPI_TYPE_BUFFER;
314         obj->buffer.length = 12;
315         obj->buffer.pointer = (u8 *) buf;
316         obj_list->count = 1;
317         obj_list->pointer = obj;
318
319         return obj_list;
320 }
321
322 /*
323  * _PDC is required for a BIOS-OS handshake for most of the newer
324  * ACPI processor features.
325  */
326 static int
327 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
328 {
329         acpi_status status = AE_OK;
330
331         if (boot_option_idle_override == IDLE_NOMWAIT) {
332                 /*
333                  * If mwait is disabled for CPU C-states, the C2C3_FFH access
334                  * mode will be disabled in the parameter of _PDC object.
335                  * Of course C1_FFH access mode will also be disabled.
336                  */
337                 union acpi_object *obj;
338                 u32 *buffer = NULL;
339
340                 obj = pdc_in->pointer;
341                 buffer = (u32 *)(obj->buffer.pointer);
342                 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
343
344         }
345         status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
346
347         if (ACPI_FAILURE(status))
348                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
349                     "Could not evaluate _PDC, using legacy perf. control.\n"));
350
351         return status;
352 }
353
354 void acpi_processor_set_pdc(acpi_handle handle)
355 {
356         struct acpi_object_list *obj_list;
357
358         if (arch_has_acpi_pdc() == false)
359                 return;
360
361         obj_list = acpi_processor_alloc_pdc();
362         if (!obj_list)
363                 return;
364
365         acpi_processor_eval_pdc(handle, obj_list);
366
367         kfree(obj_list->pointer->buffer.pointer);
368         kfree(obj_list->pointer);
369         kfree(obj_list);
370 }
371
372 static acpi_status __init
373 early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
374 {
375         if (processor_physically_present(handle) == false)
376                 return AE_OK;
377
378         acpi_processor_set_pdc(handle);
379         return AE_OK;
380 }
381
382 void __init acpi_early_processor_set_pdc(void)
383 {
384         /*
385          * Check whether the system is DMI table. If yes, OSPM
386          * should not use mwait for CPU-states.
387          */
388         dmi_check_system(processor_idle_dmi_table);
389
390         acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
391                             ACPI_UINT32_MAX,
392                             early_init_pdc, NULL, NULL, NULL);
393         acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
394 }