1 // SPDX-License-Identifier: GPL-2.0-only
3 * acpi_pad.c ACPI Processor Aggregator Driver
5 * Copyright (c) 2009, Intel Corporation.
8 #include <linux/kernel.h>
9 #include <linux/cpumask.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/kthread.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/freezer.h>
16 #include <linux/cpu.h>
17 #include <linux/tick.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <linux/perf_event.h>
21 #include <asm/mwait.h>
24 #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
25 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
26 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27 static DEFINE_MUTEX(isolated_cpus_lock);
28 static DEFINE_MUTEX(round_robin_lock);
30 static unsigned long power_saving_mwait_eax;
32 static unsigned char tsc_detected_unstable;
33 static unsigned char tsc_marked_unstable;
35 static void power_saving_mwait_init(void)
37 unsigned int eax, ebx, ecx, edx;
38 unsigned int highest_cstate = 0;
39 unsigned int highest_subcstate = 0;
42 if (!boot_cpu_has(X86_FEATURE_MWAIT))
44 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
47 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
49 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
50 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
53 edx >>= MWAIT_SUBSTATE_SIZE;
54 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
55 if (edx & MWAIT_SUBSTATE_MASK) {
57 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
60 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
61 (highest_subcstate - 1);
63 #if defined(CONFIG_X86)
64 switch (boot_cpu_data.x86_vendor) {
65 case X86_VENDOR_HYGON:
67 case X86_VENDOR_INTEL:
68 case X86_VENDOR_ZHAOXIN:
69 case X86_VENDOR_CENTAUR:
71 * AMD Fam10h TSC will tick in all
72 * C/P/S0/S1 states when this bit is set.
74 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
75 tsc_detected_unstable = 1;
78 /* TSC could halt in idle */
79 tsc_detected_unstable = 1;
84 static unsigned long cpu_weight[NR_CPUS];
85 static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
86 static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
87 static void round_robin_cpu(unsigned int tsk_index)
89 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
92 unsigned long min_weight = -1;
93 unsigned long preferred_cpu;
95 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
98 mutex_lock(&round_robin_lock);
100 for_each_cpu(cpu, pad_busy_cpus)
101 cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
102 cpumask_andnot(tmp, cpu_online_mask, tmp);
103 /* avoid HT sibilings if possible */
104 if (cpumask_empty(tmp))
105 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
106 if (cpumask_empty(tmp)) {
107 mutex_unlock(&round_robin_lock);
108 free_cpumask_var(tmp);
111 for_each_cpu(cpu, tmp) {
112 if (cpu_weight[cpu] < min_weight) {
113 min_weight = cpu_weight[cpu];
118 if (tsk_in_cpu[tsk_index] != -1)
119 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
120 tsk_in_cpu[tsk_index] = preferred_cpu;
121 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
122 cpu_weight[preferred_cpu]++;
123 mutex_unlock(&round_robin_lock);
125 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
127 free_cpumask_var(tmp);
130 static void exit_round_robin(unsigned int tsk_index)
132 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
134 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
135 tsk_in_cpu[tsk_index] = -1;
138 static unsigned int idle_pct = 5; /* percentage */
139 static unsigned int round_robin_time = 1; /* second */
140 static int power_saving_thread(void *data)
143 unsigned int tsk_index = (unsigned long)data;
144 u64 last_jiffies = 0;
146 sched_set_fifo_low(current);
148 while (!kthread_should_stop()) {
149 unsigned long expire_time;
151 /* round robin to cpus */
152 expire_time = last_jiffies + round_robin_time * HZ;
153 if (time_before(expire_time, jiffies)) {
154 last_jiffies = jiffies;
155 round_robin_cpu(tsk_index);
160 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
162 while (!need_resched()) {
163 if (tsc_detected_unstable && !tsc_marked_unstable) {
164 /* TSC could halt in idle, so notify users */
165 mark_tsc_unstable("TSC halts in idle");
166 tsc_marked_unstable = 1;
172 tick_broadcast_enable();
173 tick_broadcast_enter();
174 stop_critical_timings();
176 mwait_idle_with_hints(power_saving_mwait_eax, 1);
178 start_critical_timings();
179 tick_broadcast_exit();
181 perf_lopwr_cb(false);
185 if (time_before(expire_time, jiffies)) {
192 * current sched_rt has threshold for rt task running time.
193 * When a rt task uses 95% CPU time, the rt thread will be
194 * scheduled out for 5% CPU time to not starve other tasks. But
195 * the mechanism only works when all CPUs have RT task running,
196 * as if one CPU hasn't RT task, RT task from other CPUs will
197 * borrow CPU time from this CPU and cause RT task use > 95%
198 * CPU time. To make 'avoid starvation' work, takes a nap here.
200 if (unlikely(do_sleep))
201 schedule_timeout_killable(HZ * idle_pct / 100);
203 /* If an external event has set the need_resched flag, then
204 * we need to deal with it, or this loop will continue to
205 * spin without calling __mwait().
207 if (unlikely(need_resched()))
211 exit_round_robin(tsk_index);
215 static struct task_struct *ps_tsks[NR_CPUS];
216 static unsigned int ps_tsk_num;
217 static int create_power_saving_task(void)
221 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
222 (void *)(unsigned long)ps_tsk_num,
223 "acpi_pad/%d", ps_tsk_num);
225 if (IS_ERR(ps_tsks[ps_tsk_num])) {
226 rc = PTR_ERR(ps_tsks[ps_tsk_num]);
227 ps_tsks[ps_tsk_num] = NULL;
236 static void destroy_power_saving_task(void)
238 if (ps_tsk_num > 0) {
240 kthread_stop(ps_tsks[ps_tsk_num]);
241 ps_tsks[ps_tsk_num] = NULL;
245 static void set_power_saving_task_num(unsigned int num)
247 if (num > ps_tsk_num) {
248 while (ps_tsk_num < num) {
249 if (create_power_saving_task())
252 } else if (num < ps_tsk_num) {
253 while (ps_tsk_num > num)
254 destroy_power_saving_task();
258 static void acpi_pad_idle_cpus(unsigned int num_cpus)
262 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
263 set_power_saving_task_num(num_cpus);
268 static uint32_t acpi_pad_idle_cpus_num(void)
273 static ssize_t rrtime_store(struct device *dev,
274 struct device_attribute *attr, const char *buf, size_t count)
278 if (kstrtoul(buf, 0, &num))
280 if (num < 1 || num >= 100)
282 mutex_lock(&isolated_cpus_lock);
283 round_robin_time = num;
284 mutex_unlock(&isolated_cpus_lock);
288 static ssize_t rrtime_show(struct device *dev,
289 struct device_attribute *attr, char *buf)
291 return sysfs_emit(buf, "%d\n", round_robin_time);
293 static DEVICE_ATTR_RW(rrtime);
295 static ssize_t idlepct_store(struct device *dev,
296 struct device_attribute *attr, const char *buf, size_t count)
300 if (kstrtoul(buf, 0, &num))
302 if (num < 1 || num >= 100)
304 mutex_lock(&isolated_cpus_lock);
306 mutex_unlock(&isolated_cpus_lock);
310 static ssize_t idlepct_show(struct device *dev,
311 struct device_attribute *attr, char *buf)
313 return sysfs_emit(buf, "%d\n", idle_pct);
315 static DEVICE_ATTR_RW(idlepct);
317 static ssize_t idlecpus_store(struct device *dev,
318 struct device_attribute *attr, const char *buf, size_t count)
322 if (kstrtoul(buf, 0, &num))
324 mutex_lock(&isolated_cpus_lock);
325 acpi_pad_idle_cpus(num);
326 mutex_unlock(&isolated_cpus_lock);
330 static ssize_t idlecpus_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
333 return cpumap_print_to_pagebuf(false, buf,
334 to_cpumask(pad_busy_cpus_bits));
337 static DEVICE_ATTR_RW(idlecpus);
339 static int acpi_pad_add_sysfs(struct acpi_device *device)
343 result = device_create_file(&device->dev, &dev_attr_idlecpus);
346 result = device_create_file(&device->dev, &dev_attr_idlepct);
348 device_remove_file(&device->dev, &dev_attr_idlecpus);
351 result = device_create_file(&device->dev, &dev_attr_rrtime);
353 device_remove_file(&device->dev, &dev_attr_idlecpus);
354 device_remove_file(&device->dev, &dev_attr_idlepct);
360 static void acpi_pad_remove_sysfs(struct acpi_device *device)
362 device_remove_file(&device->dev, &dev_attr_idlecpus);
363 device_remove_file(&device->dev, &dev_attr_idlepct);
364 device_remove_file(&device->dev, &dev_attr_rrtime);
368 * Query firmware how many CPUs should be idle
369 * return -1 on failure
371 static int acpi_pad_pur(acpi_handle handle)
373 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
374 union acpi_object *package;
377 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
380 if (!buffer.length || !buffer.pointer)
383 package = buffer.pointer;
385 if (package->type == ACPI_TYPE_PACKAGE &&
386 package->package.count == 2 &&
387 package->package.elements[0].integer.value == 1) /* rev 1 */
389 num = package->package.elements[1].integer.value;
391 kfree(buffer.pointer);
395 static void acpi_pad_handle_notify(acpi_handle handle)
399 struct acpi_buffer param = {
401 .pointer = (void *)&idle_cpus,
404 mutex_lock(&isolated_cpus_lock);
405 num_cpus = acpi_pad_pur(handle);
407 mutex_unlock(&isolated_cpus_lock);
410 acpi_pad_idle_cpus(num_cpus);
411 idle_cpus = acpi_pad_idle_cpus_num();
412 acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m);
413 mutex_unlock(&isolated_cpus_lock);
416 static void acpi_pad_notify(acpi_handle handle, u32 event,
419 struct acpi_device *device = data;
422 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
423 acpi_pad_handle_notify(handle);
424 acpi_bus_generate_netlink_event(device->pnp.device_class,
425 dev_name(&device->dev), event, 0);
428 pr_warn("Unsupported event [0x%x]\n", event);
433 static int acpi_pad_add(struct acpi_device *device)
437 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
438 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
440 if (acpi_pad_add_sysfs(device))
443 status = acpi_install_notify_handler(device->handle,
444 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
445 if (ACPI_FAILURE(status)) {
446 acpi_pad_remove_sysfs(device);
453 static void acpi_pad_remove(struct acpi_device *device)
455 mutex_lock(&isolated_cpus_lock);
456 acpi_pad_idle_cpus(0);
457 mutex_unlock(&isolated_cpus_lock);
459 acpi_remove_notify_handler(device->handle,
460 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
461 acpi_pad_remove_sysfs(device);
464 static const struct acpi_device_id pad_device_ids[] = {
468 MODULE_DEVICE_TABLE(acpi, pad_device_ids);
470 static struct acpi_driver acpi_pad_driver = {
471 .name = "processor_aggregator",
472 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
473 .ids = pad_device_ids,
476 .remove = acpi_pad_remove,
480 static int __init acpi_pad_init(void)
482 /* Xen ACPI PAD is used when running as Xen Dom0. */
483 if (xen_initial_domain())
486 power_saving_mwait_init();
487 if (power_saving_mwait_eax == 0)
490 return acpi_bus_register_driver(&acpi_pad_driver);
493 static void __exit acpi_pad_exit(void)
495 acpi_bus_unregister_driver(&acpi_pad_driver);
498 module_init(acpi_pad_init);
499 module_exit(acpi_pad_exit);
500 MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
501 MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
502 MODULE_LICENSE("GPL");