2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define SAMPLE_COUNT 3
37 #define BYT_RATIOS 0x66a
40 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
41 #define fp_toint(X) ((X) >> FRAC_BITS)
43 static inline int32_t mul_fp(int32_t x, int32_t y)
45 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
48 static inline int32_t div_fp(int32_t x, int32_t y)
50 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
54 int32_t core_pct_busy;
82 struct timer_list timer;
84 struct pstate_data pstate;
92 struct sample samples[SAMPLE_COUNT];
95 static struct cpudata **all_cpu_data;
96 struct pstate_adjust_policy {
105 struct pstate_funcs {
106 int (*get_max)(void);
107 int (*get_min)(void);
108 int (*get_turbo)(void);
109 void (*set)(int pstate);
112 struct cpu_defaults {
113 struct pstate_adjust_policy pid_policy;
114 struct pstate_funcs funcs;
117 static struct pstate_adjust_policy pid_params;
118 static struct pstate_funcs pstate_funcs;
130 static struct perf_limits limits = {
133 .max_perf = int_tofp(1),
136 .max_policy_pct = 100,
137 .max_sysfs_pct = 100,
140 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
141 int deadband, int integral) {
142 pid->setpoint = setpoint;
143 pid->deadband = deadband;
144 pid->integral = int_tofp(integral);
145 pid->last_err = setpoint - busy;
148 static inline void pid_p_gain_set(struct _pid *pid, int percent)
150 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
153 static inline void pid_i_gain_set(struct _pid *pid, int percent)
155 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
158 static inline void pid_d_gain_set(struct _pid *pid, int percent)
161 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
164 static signed int pid_calc(struct _pid *pid, int32_t busy)
167 int32_t pterm, dterm, fp_error;
168 int32_t integral_limit;
170 fp_error = int_tofp(pid->setpoint) - busy;
172 if (abs(fp_error) <= int_tofp(pid->deadband))
175 pterm = mul_fp(pid->p_gain, fp_error);
177 pid->integral += fp_error;
179 /* limit the integral term */
180 integral_limit = int_tofp(30);
181 if (pid->integral > integral_limit)
182 pid->integral = integral_limit;
183 if (pid->integral < -integral_limit)
184 pid->integral = -integral_limit;
186 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
187 pid->last_err = fp_error;
189 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
191 return (signed int)fp_toint(result);
194 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
196 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
197 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
198 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
207 static inline void intel_pstate_reset_all_pid(void)
210 for_each_online_cpu(cpu) {
211 if (all_cpu_data[cpu])
212 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
216 /************************** debugfs begin ************************/
217 static int pid_param_set(void *data, u64 val)
220 intel_pstate_reset_all_pid();
223 static int pid_param_get(void *data, u64 *val)
228 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
229 pid_param_set, "%llu\n");
236 static struct pid_param pid_files[] = {
237 {"sample_rate_ms", &pid_params.sample_rate_ms},
238 {"d_gain_pct", &pid_params.d_gain_pct},
239 {"i_gain_pct", &pid_params.i_gain_pct},
240 {"deadband", &pid_params.deadband},
241 {"setpoint", &pid_params.setpoint},
242 {"p_gain_pct", &pid_params.p_gain_pct},
246 static struct dentry *debugfs_parent;
247 static void intel_pstate_debug_expose_params(void)
251 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
252 if (IS_ERR_OR_NULL(debugfs_parent))
254 while (pid_files[i].name) {
255 debugfs_create_file(pid_files[i].name, 0660,
256 debugfs_parent, pid_files[i].value,
262 /************************** debugfs end ************************/
264 /************************** sysfs begin ************************/
265 #define show_one(file_name, object) \
266 static ssize_t show_##file_name \
267 (struct kobject *kobj, struct attribute *attr, char *buf) \
269 return sprintf(buf, "%u\n", limits.object); \
272 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
273 const char *buf, size_t count)
277 ret = sscanf(buf, "%u", &input);
280 limits.no_turbo = clamp_t(int, input, 0 , 1);
285 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
286 const char *buf, size_t count)
290 ret = sscanf(buf, "%u", &input);
294 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
295 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
296 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
300 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
301 const char *buf, size_t count)
305 ret = sscanf(buf, "%u", &input);
308 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
309 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
314 show_one(no_turbo, no_turbo);
315 show_one(max_perf_pct, max_perf_pct);
316 show_one(min_perf_pct, min_perf_pct);
318 define_one_global_rw(no_turbo);
319 define_one_global_rw(max_perf_pct);
320 define_one_global_rw(min_perf_pct);
322 static struct attribute *intel_pstate_attributes[] = {
329 static struct attribute_group intel_pstate_attr_group = {
330 .attrs = intel_pstate_attributes,
332 static struct kobject *intel_pstate_kobject;
334 static void intel_pstate_sysfs_expose_params(void)
338 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
339 &cpu_subsys.dev_root->kobj);
340 BUG_ON(!intel_pstate_kobject);
341 rc = sysfs_create_group(intel_pstate_kobject,
342 &intel_pstate_attr_group);
346 /************************** sysfs end ************************/
347 static int byt_get_min_pstate(void)
350 rdmsrl(BYT_RATIOS, value);
354 static int byt_get_max_pstate(void)
357 rdmsrl(BYT_RATIOS, value);
358 return (value >> 16) & 0xFF;
361 static int core_get_min_pstate(void)
364 rdmsrl(MSR_PLATFORM_INFO, value);
365 return (value >> 40) & 0xFF;
368 static int core_get_max_pstate(void)
371 rdmsrl(MSR_PLATFORM_INFO, value);
372 return (value >> 8) & 0xFF;
375 static int core_get_turbo_pstate(void)
379 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
380 nont = core_get_max_pstate();
381 ret = ((value) & 255);
387 static void core_set_pstate(int pstate)
395 wrmsrl(MSR_IA32_PERF_CTL, val);
398 static struct cpu_defaults core_params = {
400 .sample_rate_ms = 10,
408 .get_max = core_get_max_pstate,
409 .get_min = core_get_min_pstate,
410 .get_turbo = core_get_turbo_pstate,
411 .set = core_set_pstate,
415 static struct cpu_defaults byt_params = {
417 .sample_rate_ms = 10,
425 .get_max = byt_get_max_pstate,
426 .get_min = byt_get_min_pstate,
427 .get_turbo = byt_get_max_pstate,
428 .set = core_set_pstate,
433 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
435 int max_perf = cpu->pstate.turbo_pstate;
439 max_perf = cpu->pstate.max_pstate;
441 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
442 *max = clamp_t(int, max_perf_adj,
443 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
445 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
446 *min = clamp_t(int, min_perf,
447 cpu->pstate.min_pstate, max_perf);
450 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
452 int max_perf, min_perf;
454 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
456 pstate = clamp_t(int, pstate, min_perf, max_perf);
458 if (pstate == cpu->pstate.current_pstate)
461 trace_cpu_frequency(pstate * 100000, cpu->cpu);
463 cpu->pstate.current_pstate = pstate;
465 pstate_funcs.set(pstate);
468 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
471 target = cpu->pstate.current_pstate + steps;
473 intel_pstate_set_pstate(cpu, target);
476 static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
479 target = cpu->pstate.current_pstate - steps;
480 intel_pstate_set_pstate(cpu, target);
483 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
485 sprintf(cpu->name, "Intel 2nd generation core");
487 cpu->pstate.min_pstate = pstate_funcs.get_min();
488 cpu->pstate.max_pstate = pstate_funcs.get_max();
489 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
492 * goto max pstate so we don't slow up boot if we are built-in if we are
493 * a module we will take care of it during normal operation
495 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
498 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
499 struct sample *sample)
502 core_pct = div64_u64(int_tofp(sample->aperf * 100),
504 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
506 sample->core_pct_busy = core_pct;
509 static inline void intel_pstate_sample(struct cpudata *cpu)
513 rdmsrl(MSR_IA32_APERF, aperf);
514 rdmsrl(MSR_IA32_MPERF, mperf);
515 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
516 cpu->samples[cpu->sample_ptr].aperf = aperf;
517 cpu->samples[cpu->sample_ptr].mperf = mperf;
518 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
519 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
521 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
523 cpu->prev_aperf = aperf;
524 cpu->prev_mperf = mperf;
527 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
529 int sample_time, delay;
531 sample_time = pid_params.sample_rate_ms;
532 delay = msecs_to_jiffies(sample_time);
533 mod_timer_pinned(&cpu->timer, jiffies + delay);
536 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
538 int32_t core_busy, max_pstate, current_pstate;
540 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
541 max_pstate = int_tofp(cpu->pstate.max_pstate);
542 current_pstate = int_tofp(cpu->pstate.current_pstate);
543 return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
546 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
554 busy_scaled = intel_pstate_get_scaled_busy(cpu);
556 ctl = pid_calc(pid, busy_scaled);
560 intel_pstate_pstate_increase(cpu, steps);
562 intel_pstate_pstate_decrease(cpu, steps);
565 static void intel_pstate_timer_func(unsigned long __data)
567 struct cpudata *cpu = (struct cpudata *) __data;
569 intel_pstate_sample(cpu);
570 intel_pstate_adjust_busy_pstate(cpu);
572 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
573 cpu->min_pstate_count++;
574 if (!(cpu->min_pstate_count % 5)) {
575 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
578 cpu->min_pstate_count = 0;
580 intel_pstate_set_sample_time(cpu);
583 #define ICPU(model, policy) \
584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
585 (unsigned long)&policy }
587 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
588 ICPU(0x2a, core_params),
589 ICPU(0x2d, core_params),
590 ICPU(0x37, byt_params),
591 ICPU(0x3a, core_params),
592 ICPU(0x3c, core_params),
593 ICPU(0x3e, core_params),
594 ICPU(0x3f, core_params),
595 ICPU(0x45, core_params),
596 ICPU(0x46, core_params),
599 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
601 static int intel_pstate_init_cpu(unsigned int cpunum)
604 const struct x86_cpu_id *id;
607 id = x86_match_cpu(intel_pstate_cpu_ids);
611 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
612 if (!all_cpu_data[cpunum])
615 cpu = all_cpu_data[cpunum];
617 intel_pstate_get_cpu_pstates(cpu);
618 if (!cpu->pstate.current_pstate) {
619 all_cpu_data[cpunum] = NULL;
626 init_timer_deferrable(&cpu->timer);
627 cpu->timer.function = intel_pstate_timer_func;
630 cpu->timer.expires = jiffies + HZ/100;
631 intel_pstate_busy_pid_reset(cpu);
632 intel_pstate_sample(cpu);
633 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
635 add_timer_on(&cpu->timer, cpunum);
637 pr_info("Intel pstate controlling: cpu %d\n", cpunum);
642 static unsigned int intel_pstate_get(unsigned int cpu_num)
644 struct sample *sample;
647 cpu = all_cpu_data[cpu_num];
650 sample = &cpu->samples[cpu->sample_ptr];
654 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
658 cpu = all_cpu_data[policy->cpu];
660 if (!policy->cpuinfo.max_freq)
663 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
664 limits.min_perf_pct = 100;
665 limits.min_perf = int_tofp(1);
666 limits.max_perf_pct = 100;
667 limits.max_perf = int_tofp(1);
671 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
672 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
673 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
675 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
676 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
677 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
678 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
683 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
685 cpufreq_verify_within_cpu_limits(policy);
687 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
688 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
694 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
696 int cpu = policy->cpu;
698 del_timer(&all_cpu_data[cpu]->timer);
699 kfree(all_cpu_data[cpu]);
700 all_cpu_data[cpu] = NULL;
704 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
709 rc = intel_pstate_init_cpu(policy->cpu);
713 cpu = all_cpu_data[policy->cpu];
715 if (!limits.no_turbo &&
716 limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
717 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
719 policy->policy = CPUFREQ_POLICY_POWERSAVE;
721 policy->min = cpu->pstate.min_pstate * 100000;
722 policy->max = cpu->pstate.turbo_pstate * 100000;
724 /* cpuinfo and default policy values */
725 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
726 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
727 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
728 cpumask_set_cpu(policy->cpu, policy->cpus);
733 static struct cpufreq_driver intel_pstate_driver = {
734 .flags = CPUFREQ_CONST_LOOPS,
735 .verify = intel_pstate_verify_policy,
736 .setpolicy = intel_pstate_set_policy,
737 .get = intel_pstate_get,
738 .init = intel_pstate_cpu_init,
739 .exit = intel_pstate_cpu_exit,
740 .name = "intel_pstate",
743 static int __initdata no_load;
745 static int intel_pstate_msrs_not_valid(void)
747 /* Check that all the msr's we are using are valid. */
748 u64 aperf, mperf, tmp;
750 rdmsrl(MSR_IA32_APERF, aperf);
751 rdmsrl(MSR_IA32_MPERF, mperf);
753 if (!pstate_funcs.get_max() ||
754 !pstate_funcs.get_min() ||
755 !pstate_funcs.get_turbo())
758 rdmsrl(MSR_IA32_APERF, tmp);
762 rdmsrl(MSR_IA32_MPERF, tmp);
769 static void copy_pid_params(struct pstate_adjust_policy *policy)
771 pid_params.sample_rate_ms = policy->sample_rate_ms;
772 pid_params.p_gain_pct = policy->p_gain_pct;
773 pid_params.i_gain_pct = policy->i_gain_pct;
774 pid_params.d_gain_pct = policy->d_gain_pct;
775 pid_params.deadband = policy->deadband;
776 pid_params.setpoint = policy->setpoint;
779 static void copy_cpu_funcs(struct pstate_funcs *funcs)
781 pstate_funcs.get_max = funcs->get_max;
782 pstate_funcs.get_min = funcs->get_min;
783 pstate_funcs.get_turbo = funcs->get_turbo;
784 pstate_funcs.set = funcs->set;
787 #if IS_ENABLED(CONFIG_ACPI)
788 #include <acpi/processor.h>
790 static bool intel_pstate_no_acpi_pss(void)
794 for_each_possible_cpu(i) {
796 union acpi_object *pss;
797 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
798 struct acpi_processor *pr = per_cpu(processors, i);
803 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
804 if (ACPI_FAILURE(status))
807 pss = buffer.pointer;
808 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
819 struct hw_vendor_info {
821 char oem_id[ACPI_OEM_ID_SIZE];
822 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
825 /* Hardware vendor-specific info that has its own power management modes */
826 static struct hw_vendor_info vendor_info[] = {
827 {1, "HP ", "ProLiant"},
831 static bool intel_pstate_platform_pwr_mgmt_exists(void)
833 struct acpi_table_header hdr;
834 struct hw_vendor_info *v_info;
837 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
840 for (v_info = vendor_info; v_info->valid; v_info++) {
841 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
842 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
843 && intel_pstate_no_acpi_pss())
849 #else /* CONFIG_ACPI not enabled */
850 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
851 #endif /* CONFIG_ACPI */
853 static int __init intel_pstate_init(void)
856 const struct x86_cpu_id *id;
857 struct cpu_defaults *cpu_info;
862 id = x86_match_cpu(intel_pstate_cpu_ids);
867 * The Intel pstate driver will be ignored if the platform
868 * firmware has its own power management modes.
870 if (intel_pstate_platform_pwr_mgmt_exists())
873 cpu_info = (struct cpu_defaults *)id->driver_data;
875 copy_pid_params(&cpu_info->pid_policy);
876 copy_cpu_funcs(&cpu_info->funcs);
878 if (intel_pstate_msrs_not_valid())
881 pr_info("Intel P-state driver initializing.\n");
883 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
887 rc = cpufreq_register_driver(&intel_pstate_driver);
891 intel_pstate_debug_expose_params();
892 intel_pstate_sysfs_expose_params();
896 for_each_online_cpu(cpu) {
897 if (all_cpu_data[cpu]) {
898 del_timer_sync(&all_cpu_data[cpu]->timer);
899 kfree(all_cpu_data[cpu]);
907 device_initcall(intel_pstate_init);
909 static int __init intel_pstate_setup(char *str)
914 if (!strcmp(str, "disable"))
918 early_param("intel_pstate", intel_pstate_setup);
920 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
921 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
922 MODULE_LICENSE("GPL");