Merge patch "RISC-V: Add ptrace support for vectors"
[platform/kernel/linux-rpi.git] / drivers / acpi / acpi_processor.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * acpi_processor.c - ACPI processor enumeration support
4  *
5  * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  * Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
8  * Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  * Copyright (C) 2013, Intel Corporation
10  *                     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
11  */
12 #define pr_fmt(fmt) "ACPI: " fmt
13
14 #include <linux/acpi.h>
15 #include <linux/device.h>
16 #include <linux/dmi.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/platform_device.h>
21
22 #include <acpi/processor.h>
23
24 #include <asm/cpu.h>
25
26 #include <xen/xen.h>
27
28 #include "internal.h"
29
30 DEFINE_PER_CPU(struct acpi_processor *, processors);
31 EXPORT_PER_CPU_SYMBOL(processors);
32
33 /* Errata Handling */
34 struct acpi_processor_errata errata __read_mostly;
35 EXPORT_SYMBOL_GPL(errata);
36
37 static int acpi_processor_errata_piix4(struct pci_dev *dev)
38 {
39         u8 value1 = 0;
40         u8 value2 = 0;
41
42
43         if (!dev)
44                 return -EINVAL;
45
46         /*
47          * Note that 'dev' references the PIIX4 ACPI Controller.
48          */
49
50         switch (dev->revision) {
51         case 0:
52                 dev_dbg(&dev->dev, "Found PIIX4 A-step\n");
53                 break;
54         case 1:
55                 dev_dbg(&dev->dev, "Found PIIX4 B-step\n");
56                 break;
57         case 2:
58                 dev_dbg(&dev->dev, "Found PIIX4E\n");
59                 break;
60         case 3:
61                 dev_dbg(&dev->dev, "Found PIIX4M\n");
62                 break;
63         default:
64                 dev_dbg(&dev->dev, "Found unknown PIIX4\n");
65                 break;
66         }
67
68         switch (dev->revision) {
69
70         case 0:         /* PIIX4 A-step */
71         case 1:         /* PIIX4 B-step */
72                 /*
73                  * See specification changes #13 ("Manual Throttle Duty Cycle")
74                  * and #14 ("Enabling and Disabling Manual Throttle"), plus
75                  * erratum #5 ("STPCLK# Deassertion Time") from the January
76                  * 2002 PIIX4 specification update.  Applies to only older
77                  * PIIX4 models.
78                  */
79                 errata.piix4.throttle = 1;
80                 fallthrough;
81
82         case 2:         /* PIIX4E */
83         case 3:         /* PIIX4M */
84                 /*
85                  * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
86                  * Livelock") from the January 2002 PIIX4 specification update.
87                  * Applies to all PIIX4 models.
88                  */
89
90                 /*
91                  * BM-IDE
92                  * ------
93                  * Find the PIIX4 IDE Controller and get the Bus Master IDE
94                  * Status register address.  We'll use this later to read
95                  * each IDE controller's DMA status to make sure we catch all
96                  * DMA activity.
97                  */
98                 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
99                                      PCI_DEVICE_ID_INTEL_82371AB,
100                                      PCI_ANY_ID, PCI_ANY_ID, NULL);
101                 if (dev) {
102                         errata.piix4.bmisx = pci_resource_start(dev, 4);
103                         pci_dev_put(dev);
104                 }
105
106                 /*
107                  * Type-F DMA
108                  * ----------
109                  * Find the PIIX4 ISA Controller and read the Motherboard
110                  * DMA controller's status to see if Type-F (Fast) DMA mode
111                  * is enabled (bit 7) on either channel.  Note that we'll
112                  * disable C3 support if this is enabled, as some legacy
113                  * devices won't operate well if fast DMA is disabled.
114                  */
115                 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
116                                      PCI_DEVICE_ID_INTEL_82371AB_0,
117                                      PCI_ANY_ID, PCI_ANY_ID, NULL);
118                 if (dev) {
119                         pci_read_config_byte(dev, 0x76, &value1);
120                         pci_read_config_byte(dev, 0x77, &value2);
121                         if ((value1 & 0x80) || (value2 & 0x80))
122                                 errata.piix4.fdma = 1;
123                         pci_dev_put(dev);
124                 }
125
126                 break;
127         }
128
129         if (errata.piix4.bmisx)
130                 dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n");
131         if (errata.piix4.fdma)
132                 dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n");
133
134         return 0;
135 }
136
137 static int acpi_processor_errata(void)
138 {
139         int result = 0;
140         struct pci_dev *dev = NULL;
141
142         /*
143          * PIIX4
144          */
145         dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
146                              PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
147                              PCI_ANY_ID, NULL);
148         if (dev) {
149                 result = acpi_processor_errata_piix4(dev);
150                 pci_dev_put(dev);
151         }
152
153         return result;
154 }
155
156 /* Create a platform device to represent a CPU frequency control mechanism. */
157 static void cpufreq_add_device(const char *name)
158 {
159         struct platform_device *pdev;
160
161         pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0);
162         if (IS_ERR(pdev))
163                 pr_info("%s device creation failed: %ld\n", name, PTR_ERR(pdev));
164 }
165
166 #ifdef CONFIG_X86
167 /* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */
168 static void __init acpi_pcc_cpufreq_init(void)
169 {
170         acpi_status status;
171         acpi_handle handle;
172
173         status = acpi_get_handle(NULL, "\\_SB", &handle);
174         if (ACPI_FAILURE(status))
175                 return;
176
177         if (acpi_has_method(handle, "PCCH"))
178                 cpufreq_add_device("pcc-cpufreq");
179 }
180 #else
181 static void __init acpi_pcc_cpufreq_init(void) {}
182 #endif /* CONFIG_X86 */
183
184 /* Initialization */
185 #ifdef CONFIG_ACPI_HOTPLUG_CPU
186 int __weak acpi_map_cpu(acpi_handle handle,
187                 phys_cpuid_t physid, u32 acpi_id, int *pcpu)
188 {
189         return -ENODEV;
190 }
191
192 int __weak acpi_unmap_cpu(int cpu)
193 {
194         return -ENODEV;
195 }
196
197 int __weak arch_register_cpu(int cpu)
198 {
199         return -ENODEV;
200 }
201
202 void __weak arch_unregister_cpu(int cpu) {}
203
204 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
205 {
206         unsigned long long sta;
207         acpi_status status;
208         int ret;
209
210         if (invalid_phys_cpuid(pr->phys_id))
211                 return -ENODEV;
212
213         status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
214         if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
215                 return -ENODEV;
216
217         cpu_maps_update_begin();
218         cpus_write_lock();
219
220         ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
221         if (ret)
222                 goto out;
223
224         ret = arch_register_cpu(pr->id);
225         if (ret) {
226                 acpi_unmap_cpu(pr->id);
227                 goto out;
228         }
229
230         /*
231          * CPU got hot-added, but cpu_data is not initialized yet.  Set a flag
232          * to delay cpu_idle/throttling initialization and do it when the CPU
233          * gets online for the first time.
234          */
235         pr_info("CPU%d has been hot-added\n", pr->id);
236         pr->flags.need_hotplug_init = 1;
237
238 out:
239         cpus_write_unlock();
240         cpu_maps_update_done();
241         return ret;
242 }
243 #else
244 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
245 {
246         return -ENODEV;
247 }
248 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
249
250 static int acpi_processor_get_info(struct acpi_device *device)
251 {
252         union acpi_object object = { 0 };
253         struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
254         struct acpi_processor *pr = acpi_driver_data(device);
255         int device_declaration = 0;
256         acpi_status status = AE_OK;
257         static int cpu0_initialized;
258         unsigned long long value;
259
260         acpi_processor_errata();
261
262         /*
263          * Check to see if we have bus mastering arbitration control.  This
264          * is required for proper C3 usage (to maintain cache coherency).
265          */
266         if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
267                 pr->flags.bm_control = 1;
268                 dev_dbg(&device->dev, "Bus mastering arbitration control present\n");
269         } else
270                 dev_dbg(&device->dev, "No bus mastering arbitration control\n");
271
272         if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
273                 /* Declared with "Processor" statement; match ProcessorID */
274                 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
275                 if (ACPI_FAILURE(status)) {
276                         dev_err(&device->dev,
277                                 "Failed to evaluate processor object (0x%x)\n",
278                                 status);
279                         return -ENODEV;
280                 }
281
282                 pr->acpi_id = object.processor.proc_id;
283         } else {
284                 /*
285                  * Declared with "Device" statement; match _UID.
286                  */
287                 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
288                                                 NULL, &value);
289                 if (ACPI_FAILURE(status)) {
290                         dev_err(&device->dev,
291                                 "Failed to evaluate processor _UID (0x%x)\n",
292                                 status);
293                         return -ENODEV;
294                 }
295                 device_declaration = 1;
296                 pr->acpi_id = value;
297         }
298
299         if (acpi_duplicate_processor_id(pr->acpi_id)) {
300                 if (pr->acpi_id == 0xff)
301                         dev_info_once(&device->dev,
302                                 "Entry not well-defined, consider updating BIOS\n");
303                 else
304                         dev_err(&device->dev,
305                                 "Failed to get unique processor _UID (0x%x)\n",
306                                 pr->acpi_id);
307                 return -ENODEV;
308         }
309
310         pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
311                                         pr->acpi_id);
312         if (invalid_phys_cpuid(pr->phys_id))
313                 dev_dbg(&device->dev, "Failed to get CPU physical ID.\n");
314
315         pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
316         if (!cpu0_initialized) {
317                 cpu0_initialized = 1;
318                 /*
319                  * Handle UP system running SMP kernel, with no CPU
320                  * entry in MADT
321                  */
322                 if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) &&
323                     (num_online_cpus() == 1))
324                         pr->id = 0;
325                 /*
326                  * Check availability of Processor Performance Control by
327                  * looking at the presence of the _PCT object under the first
328                  * processor definition.
329                  */
330                 if (acpi_has_method(pr->handle, "_PCT"))
331                         cpufreq_add_device("acpi-cpufreq");
332         }
333
334         /*
335          *  Extra Processor objects may be enumerated on MP systems with
336          *  less than the max # of CPUs. They should be ignored _iff
337          *  they are physically not present.
338          *
339          *  NOTE: Even if the processor has a cpuid, it may not be present
340          *  because cpuid <-> apicid mapping is persistent now.
341          */
342         if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
343                 int ret = acpi_processor_hotadd_init(pr);
344
345                 if (ret)
346                         return ret;
347         }
348
349         /*
350          * On some boxes several processors use the same processor bus id.
351          * But they are located in different scope. For example:
352          * \_SB.SCK0.CPU0
353          * \_SB.SCK1.CPU0
354          * Rename the processor device bus id. And the new bus id will be
355          * generated as the following format:
356          * CPU+CPU ID.
357          */
358         sprintf(acpi_device_bid(device), "CPU%X", pr->id);
359         dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id);
360
361         if (!object.processor.pblk_address)
362                 dev_dbg(&device->dev, "No PBLK (NULL address)\n");
363         else if (object.processor.pblk_length != 6)
364                 dev_err(&device->dev, "Invalid PBLK length [%d]\n",
365                             object.processor.pblk_length);
366         else {
367                 pr->throttling.address = object.processor.pblk_address;
368                 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
369                 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
370
371                 pr->pblk = object.processor.pblk_address;
372         }
373
374         /*
375          * If ACPI describes a slot number for this CPU, we can use it to
376          * ensure we get the right value in the "physical id" field
377          * of /proc/cpuinfo
378          */
379         status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
380         if (ACPI_SUCCESS(status))
381                 arch_fix_phys_package_id(pr->id, value);
382
383         return 0;
384 }
385
386 /*
387  * Do not put anything in here which needs the core to be online.
388  * For example MSR access or setting up things which check for cpuinfo_x86
389  * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
390  * Such things have to be put in and set up by the processor driver's .probe().
391  */
392 static DEFINE_PER_CPU(void *, processor_device_array);
393
394 static int acpi_processor_add(struct acpi_device *device,
395                                         const struct acpi_device_id *id)
396 {
397         struct acpi_processor *pr;
398         struct device *dev;
399         int result = 0;
400
401         pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
402         if (!pr)
403                 return -ENOMEM;
404
405         if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
406                 result = -ENOMEM;
407                 goto err_free_pr;
408         }
409
410         pr->handle = device->handle;
411         strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
412         strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
413         device->driver_data = pr;
414
415         result = acpi_processor_get_info(device);
416         if (result) /* Processor is not physically present or unavailable */
417                 return 0;
418
419         BUG_ON(pr->id >= nr_cpu_ids);
420
421         /*
422          * Buggy BIOS check.
423          * ACPI id of processors can be reported wrongly by the BIOS.
424          * Don't trust it blindly
425          */
426         if (per_cpu(processor_device_array, pr->id) != NULL &&
427             per_cpu(processor_device_array, pr->id) != device) {
428                 dev_warn(&device->dev,
429                         "BIOS reported wrong ACPI id %d for the processor\n",
430                         pr->id);
431                 /* Give up, but do not abort the namespace scan. */
432                 goto err;
433         }
434         /*
435          * processor_device_array is not cleared on errors to allow buggy BIOS
436          * checks.
437          */
438         per_cpu(processor_device_array, pr->id) = device;
439         per_cpu(processors, pr->id) = pr;
440
441         dev = get_cpu_device(pr->id);
442         if (!dev) {
443                 result = -ENODEV;
444                 goto err;
445         }
446
447         result = acpi_bind_one(dev, device);
448         if (result)
449                 goto err;
450
451         pr->dev = dev;
452
453         /* Trigger the processor driver's .probe() if present. */
454         if (device_attach(dev) >= 0)
455                 return 1;
456
457         dev_err(dev, "Processor driver could not be attached\n");
458         acpi_unbind_one(dev);
459
460  err:
461         free_cpumask_var(pr->throttling.shared_cpu_map);
462         device->driver_data = NULL;
463         per_cpu(processors, pr->id) = NULL;
464  err_free_pr:
465         kfree(pr);
466         return result;
467 }
468
469 #ifdef CONFIG_ACPI_HOTPLUG_CPU
470 /* Removal */
471 static void acpi_processor_remove(struct acpi_device *device)
472 {
473         struct acpi_processor *pr;
474
475         if (!device || !acpi_driver_data(device))
476                 return;
477
478         pr = acpi_driver_data(device);
479         if (pr->id >= nr_cpu_ids)
480                 goto out;
481
482         /*
483          * The only reason why we ever get here is CPU hot-removal.  The CPU is
484          * already offline and the ACPI device removal locking prevents it from
485          * being put back online at this point.
486          *
487          * Unbind the driver from the processor device and detach it from the
488          * ACPI companion object.
489          */
490         device_release_driver(pr->dev);
491         acpi_unbind_one(pr->dev);
492
493         /* Clean up. */
494         per_cpu(processor_device_array, pr->id) = NULL;
495         per_cpu(processors, pr->id) = NULL;
496
497         cpu_maps_update_begin();
498         cpus_write_lock();
499
500         /* Remove the CPU. */
501         arch_unregister_cpu(pr->id);
502         acpi_unmap_cpu(pr->id);
503
504         cpus_write_unlock();
505         cpu_maps_update_done();
506
507         try_offline_node(cpu_to_node(pr->id));
508
509  out:
510         free_cpumask_var(pr->throttling.shared_cpu_map);
511         kfree(pr);
512 }
513 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
514
515 #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
516 bool __init processor_physically_present(acpi_handle handle)
517 {
518         int cpuid, type;
519         u32 acpi_id;
520         acpi_status status;
521         acpi_object_type acpi_type;
522         unsigned long long tmp;
523         union acpi_object object = {};
524         struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
525
526         status = acpi_get_type(handle, &acpi_type);
527         if (ACPI_FAILURE(status))
528                 return false;
529
530         switch (acpi_type) {
531         case ACPI_TYPE_PROCESSOR:
532                 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
533                 if (ACPI_FAILURE(status))
534                         return false;
535                 acpi_id = object.processor.proc_id;
536                 break;
537         case ACPI_TYPE_DEVICE:
538                 status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
539                                                NULL, &tmp);
540                 if (ACPI_FAILURE(status))
541                         return false;
542                 acpi_id = tmp;
543                 break;
544         default:
545                 return false;
546         }
547
548         if (xen_initial_domain())
549                 /*
550                  * When running as a Xen dom0 the number of processors Linux
551                  * sees can be different from the real number of processors on
552                  * the system, and we still need to execute _PDC or _OSC for
553                  * all of them.
554                  */
555                 return xen_processor_present(acpi_id);
556
557         type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
558         cpuid = acpi_get_cpuid(handle, type, acpi_id);
559
560         return !invalid_logical_cpuid(cpuid);
561 }
562
563 /* vendor specific UUID indicating an Intel platform */
564 static u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
565
566 static acpi_status __init acpi_processor_osc(acpi_handle handle, u32 lvl,
567                                              void *context, void **rv)
568 {
569         u32 capbuf[2] = {};
570         struct acpi_osc_context osc_context = {
571                 .uuid_str = sb_uuid_str,
572                 .rev = 1,
573                 .cap.length = 8,
574                 .cap.pointer = capbuf,
575         };
576         acpi_status status;
577
578         if (!processor_physically_present(handle))
579                 return AE_OK;
580
581         arch_acpi_set_proc_cap_bits(&capbuf[OSC_SUPPORT_DWORD]);
582
583         status = acpi_run_osc(handle, &osc_context);
584         if (ACPI_FAILURE(status))
585                 return status;
586
587         kfree(osc_context.ret.pointer);
588
589         return AE_OK;
590 }
591
592 static bool __init acpi_early_processor_osc(void)
593 {
594         acpi_status status;
595
596         acpi_proc_quirk_mwait_check();
597
598         status = acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
599                                      ACPI_UINT32_MAX, acpi_processor_osc, NULL,
600                                      NULL, NULL);
601         if (ACPI_FAILURE(status))
602                 return false;
603
604         status = acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_osc,
605                                   NULL, NULL);
606         if (ACPI_FAILURE(status))
607                 return false;
608
609         return true;
610 }
611
612 void __init acpi_early_processor_control_setup(void)
613 {
614         if (acpi_early_processor_osc()) {
615                 pr_info("_OSC evaluated successfully for all CPUs\n");
616         } else {
617                 pr_info("_OSC evaluation for CPUs failed, trying _PDC\n");
618                 acpi_early_processor_set_pdc();
619         }
620 }
621 #endif
622
623 /*
624  * The following ACPI IDs are known to be suitable for representing as
625  * processor devices.
626  */
627 static const struct acpi_device_id processor_device_ids[] = {
628
629         { ACPI_PROCESSOR_OBJECT_HID, },
630         { ACPI_PROCESSOR_DEVICE_HID, },
631
632         { }
633 };
634
635 static struct acpi_scan_handler processor_handler = {
636         .ids = processor_device_ids,
637         .attach = acpi_processor_add,
638 #ifdef CONFIG_ACPI_HOTPLUG_CPU
639         .detach = acpi_processor_remove,
640 #endif
641         .hotplug = {
642                 .enabled = true,
643         },
644 };
645
646 static int acpi_processor_container_attach(struct acpi_device *dev,
647                                            const struct acpi_device_id *id)
648 {
649         return 1;
650 }
651
652 static const struct acpi_device_id processor_container_ids[] = {
653         { ACPI_PROCESSOR_CONTAINER_HID, },
654         { }
655 };
656
657 static struct acpi_scan_handler processor_container_handler = {
658         .ids = processor_container_ids,
659         .attach = acpi_processor_container_attach,
660 };
661
662 /* The number of the unique processor IDs */
663 static int nr_unique_ids __initdata;
664
665 /* The number of the duplicate processor IDs */
666 static int nr_duplicate_ids;
667
668 /* Used to store the unique processor IDs */
669 static int unique_processor_ids[] __initdata = {
670         [0 ... NR_CPUS - 1] = -1,
671 };
672
673 /* Used to store the duplicate processor IDs */
674 static int duplicate_processor_ids[] = {
675         [0 ... NR_CPUS - 1] = -1,
676 };
677
678 static void __init processor_validated_ids_update(int proc_id)
679 {
680         int i;
681
682         if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS)
683                 return;
684
685         /*
686          * Firstly, compare the proc_id with duplicate IDs, if the proc_id is
687          * already in the IDs, do nothing.
688          */
689         for (i = 0; i < nr_duplicate_ids; i++) {
690                 if (duplicate_processor_ids[i] == proc_id)
691                         return;
692         }
693
694         /*
695          * Secondly, compare the proc_id with unique IDs, if the proc_id is in
696          * the IDs, put it in the duplicate IDs.
697          */
698         for (i = 0; i < nr_unique_ids; i++) {
699                 if (unique_processor_ids[i] == proc_id) {
700                         duplicate_processor_ids[nr_duplicate_ids] = proc_id;
701                         nr_duplicate_ids++;
702                         return;
703                 }
704         }
705
706         /*
707          * Lastly, the proc_id is a unique ID, put it in the unique IDs.
708          */
709         unique_processor_ids[nr_unique_ids] = proc_id;
710         nr_unique_ids++;
711 }
712
713 static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
714                                                   u32 lvl,
715                                                   void *context,
716                                                   void **rv)
717 {
718         acpi_status status;
719         acpi_object_type acpi_type;
720         unsigned long long uid;
721         union acpi_object object = { 0 };
722         struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
723
724         status = acpi_get_type(handle, &acpi_type);
725         if (ACPI_FAILURE(status))
726                 return status;
727
728         switch (acpi_type) {
729         case ACPI_TYPE_PROCESSOR:
730                 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
731                 if (ACPI_FAILURE(status))
732                         goto err;
733                 uid = object.processor.proc_id;
734                 break;
735
736         case ACPI_TYPE_DEVICE:
737                 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
738                 if (ACPI_FAILURE(status))
739                         goto err;
740                 break;
741         default:
742                 goto err;
743         }
744
745         processor_validated_ids_update(uid);
746         return AE_OK;
747
748 err:
749         /* Exit on error, but don't abort the namespace walk */
750         acpi_handle_info(handle, "Invalid processor object\n");
751         return AE_OK;
752
753 }
754
755 static void __init acpi_processor_check_duplicates(void)
756 {
757         /* check the correctness for all processors in ACPI namespace */
758         acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
759                                                 ACPI_UINT32_MAX,
760                                                 acpi_processor_ids_walk,
761                                                 NULL, NULL, NULL);
762         acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
763                                                 NULL, NULL);
764 }
765
766 bool acpi_duplicate_processor_id(int proc_id)
767 {
768         int i;
769
770         /*
771          * compare the proc_id with duplicate IDs, if the proc_id is already
772          * in the duplicate IDs, return true, otherwise, return false.
773          */
774         for (i = 0; i < nr_duplicate_ids; i++) {
775                 if (duplicate_processor_ids[i] == proc_id)
776                         return true;
777         }
778         return false;
779 }
780
781 void __init acpi_processor_init(void)
782 {
783         acpi_processor_check_duplicates();
784         acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
785         acpi_scan_add_handler(&processor_container_handler);
786         acpi_pcc_cpufreq_init();
787 }
788
789 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
790 /**
791  * acpi_processor_claim_cst_control - Request _CST control from the platform.
792  */
793 bool acpi_processor_claim_cst_control(void)
794 {
795         static bool cst_control_claimed;
796         acpi_status status;
797
798         if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
799                 return true;
800
801         status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
802                                     acpi_gbl_FADT.cst_control, 8);
803         if (ACPI_FAILURE(status)) {
804                 pr_warn("ACPI: Failed to claim processor _CST control\n");
805                 return false;
806         }
807
808         cst_control_claimed = true;
809         return true;
810 }
811 EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
812
813 /**
814  * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
815  * @handle: ACPI handle of the processor object containing the _CST.
816  * @cpu: The numeric ID of the target CPU.
817  * @info: Object write the C-states information into.
818  *
819  * Extract the C-state information for the given CPU from the output of the _CST
820  * control method under the corresponding ACPI processor object (or processor
821  * device object) and populate @info with it.
822  *
823  * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
824  * acpi_processor_ffh_cstate_probe() to verify them and update the
825  * cpu_cstate_entry data for @cpu.
826  */
827 int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
828                                 struct acpi_processor_power *info)
829 {
830         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
831         union acpi_object *cst;
832         acpi_status status;
833         u64 count;
834         int last_index = 0;
835         int i, ret = 0;
836
837         status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
838         if (ACPI_FAILURE(status)) {
839                 acpi_handle_debug(handle, "No _CST\n");
840                 return -ENODEV;
841         }
842
843         cst = buffer.pointer;
844
845         /* There must be at least 2 elements. */
846         if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
847                 acpi_handle_warn(handle, "Invalid _CST output\n");
848                 ret = -EFAULT;
849                 goto end;
850         }
851
852         count = cst->package.elements[0].integer.value;
853
854         /* Validate the number of C-states. */
855         if (count < 1 || count != cst->package.count - 1) {
856                 acpi_handle_warn(handle, "Inconsistent _CST data\n");
857                 ret = -EFAULT;
858                 goto end;
859         }
860
861         for (i = 1; i <= count; i++) {
862                 union acpi_object *element;
863                 union acpi_object *obj;
864                 struct acpi_power_register *reg;
865                 struct acpi_processor_cx cx;
866
867                 /*
868                  * If there is not enough space for all C-states, skip the
869                  * excess ones and log a warning.
870                  */
871                 if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
872                         acpi_handle_warn(handle,
873                                          "No room for more idle states (limit: %d)\n",
874                                          ACPI_PROCESSOR_MAX_POWER - 1);
875                         break;
876                 }
877
878                 memset(&cx, 0, sizeof(cx));
879
880                 element = &cst->package.elements[i];
881                 if (element->type != ACPI_TYPE_PACKAGE) {
882                         acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
883                                          i, element->type);
884                         continue;
885                 }
886
887                 if (element->package.count != 4) {
888                         acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
889                                          i, element->package.count);
890                         continue;
891                 }
892
893                 obj = &element->package.elements[0];
894
895                 if (obj->type != ACPI_TYPE_BUFFER) {
896                         acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
897                                          i, obj->type);
898                         continue;
899                 }
900
901                 reg = (struct acpi_power_register *)obj->buffer.pointer;
902
903                 obj = &element->package.elements[1];
904                 if (obj->type != ACPI_TYPE_INTEGER) {
905                         acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
906                                          i, obj->type);
907                         continue;
908                 }
909
910                 cx.type = obj->integer.value;
911                 /*
912                  * There are known cases in which the _CST output does not
913                  * contain C1, so if the type of the first state found is not
914                  * C1, leave an empty slot for C1 to be filled in later.
915                  */
916                 if (i == 1 && cx.type != ACPI_STATE_C1)
917                         last_index = 1;
918
919                 cx.address = reg->address;
920                 cx.index = last_index + 1;
921
922                 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
923                         if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
924                                 /*
925                                  * In the majority of cases _CST describes C1 as
926                                  * a FIXED_HARDWARE C-state, but if the command
927                                  * line forbids using MWAIT, use CSTATE_HALT for
928                                  * C1 regardless.
929                                  */
930                                 if (cx.type == ACPI_STATE_C1 &&
931                                     boot_option_idle_override == IDLE_NOMWAIT) {
932                                         cx.entry_method = ACPI_CSTATE_HALT;
933                                         snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
934                                 } else {
935                                         cx.entry_method = ACPI_CSTATE_FFH;
936                                 }
937                         } else if (cx.type == ACPI_STATE_C1) {
938                                 /*
939                                  * In the special case of C1, FIXED_HARDWARE can
940                                  * be handled by executing the HLT instruction.
941                                  */
942                                 cx.entry_method = ACPI_CSTATE_HALT;
943                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
944                         } else {
945                                 acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
946                                                  i);
947                                 continue;
948                         }
949                 } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
950                         cx.entry_method = ACPI_CSTATE_SYSTEMIO;
951                         snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
952                                  cx.address);
953                 } else {
954                         acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
955                                          i, reg->space_id);
956                         continue;
957                 }
958
959                 if (cx.type == ACPI_STATE_C1)
960                         cx.valid = 1;
961
962                 obj = &element->package.elements[2];
963                 if (obj->type != ACPI_TYPE_INTEGER) {
964                         acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
965                                          i, obj->type);
966                         continue;
967                 }
968
969                 cx.latency = obj->integer.value;
970
971                 obj = &element->package.elements[3];
972                 if (obj->type != ACPI_TYPE_INTEGER) {
973                         acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
974                                          i, obj->type);
975                         continue;
976                 }
977
978                 memcpy(&info->states[++last_index], &cx, sizeof(cx));
979         }
980
981         acpi_handle_info(handle, "Found %d idle states\n", last_index);
982
983         info->count = last_index;
984
985 end:
986         kfree(buffer.pointer);
987
988         return ret;
989 }
990 EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
991 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */