2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
35 #include <trace/events/power.h>
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_SPINLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
110 static bool init_cpufreq_transition_notifier_list_called;
111 static int __init init_cpufreq_transition_notifier_list(void)
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 init_cpufreq_transition_notifier_list_called = true;
117 pure_initcall(init_cpufreq_transition_notifier_list);
119 static int off __read_mostly;
120 static int cpufreq_disabled(void)
124 void disable_cpufreq(void)
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
131 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
133 struct cpufreq_policy *data;
136 if (cpu >= nr_cpu_ids)
139 /* get the cpufreq driver */
140 spin_lock_irqsave(&cpufreq_driver_lock, flags);
145 if (!try_module_get(cpufreq_driver->owner))
150 data = per_cpu(cpufreq_cpu_data, cpu);
153 goto err_out_put_module;
155 if (!sysfs && !kobject_get(&data->kobj))
156 goto err_out_put_module;
158 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
162 module_put(cpufreq_driver->owner);
164 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
169 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
171 if (cpufreq_disabled())
174 return __cpufreq_cpu_get(cpu, false);
176 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
178 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
180 return __cpufreq_cpu_get(cpu, true);
183 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
186 kobject_put(&data->kobj);
187 module_put(cpufreq_driver->owner);
190 void cpufreq_cpu_put(struct cpufreq_policy *data)
192 if (cpufreq_disabled())
195 __cpufreq_cpu_put(data, false);
197 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
199 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
201 __cpufreq_cpu_put(data, true);
204 /*********************************************************************
205 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
206 *********************************************************************/
209 * adjust_jiffies - adjust the system "loops_per_jiffy"
211 * This function alters the system "loops_per_jiffy" for the clock
212 * speed change. Note that loops_per_jiffy cannot be updated on SMP
213 * systems as each CPU might be scaled differently. So, use the arch
214 * per-CPU loops_per_jiffy value wherever possible.
217 static unsigned long l_p_j_ref;
218 static unsigned int l_p_j_ref_freq;
220 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
222 if (ci->flags & CPUFREQ_CONST_LOOPS)
225 if (!l_p_j_ref_freq) {
226 l_p_j_ref = loops_per_jiffy;
227 l_p_j_ref_freq = ci->old;
228 pr_debug("saving %lu as reference value for loops_per_jiffy; "
229 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
231 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
235 pr_debug("scaling loops_per_jiffy to %lu "
236 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
240 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
249 * on frequency transition.
251 * This function calls the transition notifiers and the "adjust_jiffies"
252 * function. It is called twice on all CPU frequency changes that have
255 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
257 struct cpufreq_policy *policy;
260 BUG_ON(irqs_disabled());
262 if (cpufreq_disabled())
265 freqs->flags = cpufreq_driver->flags;
266 pr_debug("notification %u of frequency transition to %u kHz\n",
269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
275 case CPUFREQ_PRECHANGE:
276 /* detect if the driver reported a value as "old frequency"
277 * which is not equal to what the cpufreq core thinks is
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
283 pr_debug("Warning: CPU frequency is"
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
290 CPUFREQ_PRECHANGE, freqs);
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
297 (unsigned long)freqs->cpu);
298 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
299 trace_cpu_frequency(freqs->new, freqs->cpu);
300 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
301 CPUFREQ_POSTCHANGE, freqs);
302 if (likely(policy) && likely(policy->cpu == freqs->cpu))
303 policy->cur = freqs->new;
307 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
311 /*********************************************************************
313 *********************************************************************/
315 static struct cpufreq_governor *__find_governor(const char *str_governor)
317 struct cpufreq_governor *t;
319 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
320 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
327 * cpufreq_parse_governor - parse a governor string
329 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
330 struct cpufreq_governor **governor)
337 if (cpufreq_driver->setpolicy) {
338 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
339 *policy = CPUFREQ_POLICY_PERFORMANCE;
341 } else if (!strnicmp(str_governor, "powersave",
343 *policy = CPUFREQ_POLICY_POWERSAVE;
346 } else if (cpufreq_driver->target) {
347 struct cpufreq_governor *t;
349 mutex_lock(&cpufreq_governor_mutex);
351 t = __find_governor(str_governor);
356 mutex_unlock(&cpufreq_governor_mutex);
357 ret = request_module("cpufreq_%s", str_governor);
358 mutex_lock(&cpufreq_governor_mutex);
361 t = __find_governor(str_governor);
369 mutex_unlock(&cpufreq_governor_mutex);
377 * cpufreq_per_cpu_attr_read() / show_##file_name() -
378 * print out cpufreq information
380 * Write out information from cpufreq_driver->policy[cpu]; object must be
384 #define show_one(file_name, object) \
385 static ssize_t show_##file_name \
386 (struct cpufreq_policy *policy, char *buf) \
388 return sprintf(buf, "%u\n", policy->object); \
391 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
392 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
393 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
394 show_one(scaling_min_freq, min);
395 show_one(scaling_max_freq, max);
396 show_one(scaling_cur_freq, cur);
398 static int __cpufreq_set_policy(struct cpufreq_policy *data,
399 struct cpufreq_policy *policy);
402 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
404 #define store_one(file_name, object) \
405 static ssize_t store_##file_name \
406 (struct cpufreq_policy *policy, const char *buf, size_t count) \
409 struct cpufreq_policy new_policy; \
411 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
415 ret = sscanf(buf, "%u", &new_policy.object); \
419 ret = __cpufreq_set_policy(policy, &new_policy); \
420 policy->user_policy.object = policy->object; \
422 return ret ? ret : count; \
425 store_one(scaling_min_freq, min);
426 store_one(scaling_max_freq, max);
429 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
431 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
434 unsigned int cur_freq = __cpufreq_get(policy->cpu);
436 return sprintf(buf, "<unknown>");
437 return sprintf(buf, "%u\n", cur_freq);
442 * show_scaling_governor - show the current policy for the specified CPU
444 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
446 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
447 return sprintf(buf, "powersave\n");
448 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
449 return sprintf(buf, "performance\n");
450 else if (policy->governor)
451 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
452 policy->governor->name);
458 * store_scaling_governor - store policy for the specified CPU
460 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
461 const char *buf, size_t count)
464 char str_governor[16];
465 struct cpufreq_policy new_policy;
467 ret = cpufreq_get_policy(&new_policy, policy->cpu);
471 ret = sscanf(buf, "%15s", str_governor);
475 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
476 &new_policy.governor))
479 /* Do not use cpufreq_set_policy here or the user_policy.max
480 will be wrongly overridden */
481 ret = __cpufreq_set_policy(policy, &new_policy);
483 policy->user_policy.policy = policy->policy;
484 policy->user_policy.governor = policy->governor;
493 * show_scaling_driver - show the cpufreq driver currently loaded
495 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
497 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
501 * show_scaling_available_governors - show the available CPUfreq governors
503 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
507 struct cpufreq_governor *t;
509 if (!cpufreq_driver->target) {
510 i += sprintf(buf, "performance powersave");
514 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
515 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
516 - (CPUFREQ_NAME_LEN + 2)))
518 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
521 i += sprintf(&buf[i], "\n");
525 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
530 for_each_cpu(cpu, mask) {
532 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
533 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
534 if (i >= (PAGE_SIZE - 5))
537 i += sprintf(&buf[i], "\n");
542 * show_related_cpus - show the CPUs affected by each transition even if
543 * hw coordination is in use
545 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
547 return show_cpus(policy->related_cpus, buf);
551 * show_affected_cpus - show the CPUs affected by each transition
553 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
555 return show_cpus(policy->cpus, buf);
558 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
559 const char *buf, size_t count)
561 unsigned int freq = 0;
564 if (!policy->governor || !policy->governor->store_setspeed)
567 ret = sscanf(buf, "%u", &freq);
571 policy->governor->store_setspeed(policy, freq);
576 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
578 if (!policy->governor || !policy->governor->show_setspeed)
579 return sprintf(buf, "<unsupported>\n");
581 return policy->governor->show_setspeed(policy, buf);
585 * show_bios_limit - show the current cpufreq HW/BIOS limitation
587 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
591 if (cpufreq_driver->bios_limit) {
592 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
594 return sprintf(buf, "%u\n", limit);
596 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
599 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
600 cpufreq_freq_attr_ro(cpuinfo_min_freq);
601 cpufreq_freq_attr_ro(cpuinfo_max_freq);
602 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
603 cpufreq_freq_attr_ro(scaling_available_governors);
604 cpufreq_freq_attr_ro(scaling_driver);
605 cpufreq_freq_attr_ro(scaling_cur_freq);
606 cpufreq_freq_attr_ro(bios_limit);
607 cpufreq_freq_attr_ro(related_cpus);
608 cpufreq_freq_attr_ro(affected_cpus);
609 cpufreq_freq_attr_rw(scaling_min_freq);
610 cpufreq_freq_attr_rw(scaling_max_freq);
611 cpufreq_freq_attr_rw(scaling_governor);
612 cpufreq_freq_attr_rw(scaling_setspeed);
614 static struct attribute *default_attrs[] = {
615 &cpuinfo_min_freq.attr,
616 &cpuinfo_max_freq.attr,
617 &cpuinfo_transition_latency.attr,
618 &scaling_min_freq.attr,
619 &scaling_max_freq.attr,
622 &scaling_governor.attr,
623 &scaling_driver.attr,
624 &scaling_available_governors.attr,
625 &scaling_setspeed.attr,
629 struct kobject *cpufreq_global_kobject;
630 EXPORT_SYMBOL(cpufreq_global_kobject);
632 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
633 #define to_attr(a) container_of(a, struct freq_attr, attr)
635 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
637 struct cpufreq_policy *policy = to_policy(kobj);
638 struct freq_attr *fattr = to_attr(attr);
639 ssize_t ret = -EINVAL;
640 policy = cpufreq_cpu_get_sysfs(policy->cpu);
644 if (lock_policy_rwsem_read(policy->cpu) < 0)
648 ret = fattr->show(policy, buf);
652 unlock_policy_rwsem_read(policy->cpu);
654 cpufreq_cpu_put_sysfs(policy);
659 static ssize_t store(struct kobject *kobj, struct attribute *attr,
660 const char *buf, size_t count)
662 struct cpufreq_policy *policy = to_policy(kobj);
663 struct freq_attr *fattr = to_attr(attr);
664 ssize_t ret = -EINVAL;
665 policy = cpufreq_cpu_get_sysfs(policy->cpu);
669 if (lock_policy_rwsem_write(policy->cpu) < 0)
673 ret = fattr->store(policy, buf, count);
677 unlock_policy_rwsem_write(policy->cpu);
679 cpufreq_cpu_put_sysfs(policy);
684 static void cpufreq_sysfs_release(struct kobject *kobj)
686 struct cpufreq_policy *policy = to_policy(kobj);
687 pr_debug("last reference is dropped\n");
688 complete(&policy->kobj_unregister);
691 static const struct sysfs_ops sysfs_ops = {
696 static struct kobj_type ktype_cpufreq = {
697 .sysfs_ops = &sysfs_ops,
698 .default_attrs = default_attrs,
699 .release = cpufreq_sysfs_release,
702 /* symlink affected CPUs */
703 static int cpufreq_add_dev_symlink(unsigned int cpu,
704 struct cpufreq_policy *policy)
709 for_each_cpu(j, policy->cpus) {
710 struct cpufreq_policy *managed_policy;
711 struct device *cpu_dev;
716 pr_debug("CPU %u already managed, adding link\n", j);
717 managed_policy = cpufreq_cpu_get(cpu);
718 cpu_dev = get_cpu_device(j);
719 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
722 cpufreq_cpu_put(managed_policy);
729 static int cpufreq_add_dev_interface(unsigned int cpu,
730 struct cpufreq_policy *policy,
733 struct cpufreq_policy new_policy;
734 struct freq_attr **drv_attr;
739 /* prepare interface data */
740 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
741 &dev->kobj, "cpufreq");
745 /* set up files for this cpu device */
746 drv_attr = cpufreq_driver->attr;
747 while ((drv_attr) && (*drv_attr)) {
748 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
750 goto err_out_kobj_put;
753 if (cpufreq_driver->get) {
754 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
756 goto err_out_kobj_put;
758 if (cpufreq_driver->target) {
759 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
761 goto err_out_kobj_put;
763 if (cpufreq_driver->bios_limit) {
764 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
766 goto err_out_kobj_put;
769 spin_lock_irqsave(&cpufreq_driver_lock, flags);
770 for_each_cpu(j, policy->cpus) {
771 per_cpu(cpufreq_cpu_data, j) = policy;
772 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
774 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
776 ret = cpufreq_add_dev_symlink(cpu, policy);
778 goto err_out_kobj_put;
780 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
781 /* assure that the starting sequence is run in __cpufreq_set_policy */
782 policy->governor = NULL;
784 /* set default policy */
785 ret = __cpufreq_set_policy(policy, &new_policy);
786 policy->user_policy.policy = policy->policy;
787 policy->user_policy.governor = policy->governor;
790 pr_debug("setting policy failed\n");
791 if (cpufreq_driver->exit)
792 cpufreq_driver->exit(policy);
797 kobject_put(&policy->kobj);
798 wait_for_completion(&policy->kobj_unregister);
802 #ifdef CONFIG_HOTPLUG_CPU
803 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
806 struct cpufreq_policy *policy;
810 policy = cpufreq_cpu_get(sibling);
813 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
815 lock_policy_rwsem_write(sibling);
817 spin_lock_irqsave(&cpufreq_driver_lock, flags);
819 cpumask_set_cpu(cpu, policy->cpus);
820 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
821 per_cpu(cpufreq_cpu_data, cpu) = policy;
822 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
824 unlock_policy_rwsem_write(sibling);
826 __cpufreq_governor(policy, CPUFREQ_GOV_START);
827 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
829 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
831 cpufreq_cpu_put(policy);
840 * cpufreq_add_dev - add a CPU device
842 * Adds the cpufreq interface for a CPU device.
844 * The Oracle says: try running cpufreq registration/unregistration concurrently
845 * with with cpu hotplugging and all hell will break loose. Tried to clean this
846 * mess up, but more thorough testing is needed. - Mathieu
848 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
850 unsigned int j, cpu = dev->id;
852 struct cpufreq_policy *policy;
854 #ifdef CONFIG_HOTPLUG_CPU
855 struct cpufreq_governor *gov;
859 if (cpu_is_offline(cpu))
862 pr_debug("adding CPU %u\n", cpu);
865 /* check whether a different CPU already registered this
866 * CPU because it is in the same boat. */
867 policy = cpufreq_cpu_get(cpu);
868 if (unlikely(policy)) {
869 cpufreq_cpu_put(policy);
873 #ifdef CONFIG_HOTPLUG_CPU
874 /* Check if this cpu was hot-unplugged earlier and has siblings */
875 spin_lock_irqsave(&cpufreq_driver_lock, flags);
876 for_each_online_cpu(sibling) {
877 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
878 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
879 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
880 return cpufreq_add_policy_cpu(cpu, sibling, dev);
883 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
887 if (!try_module_get(cpufreq_driver->owner)) {
892 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
896 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
897 goto err_free_policy;
899 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
900 goto err_free_cpumask;
903 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
904 cpumask_copy(policy->cpus, cpumask_of(cpu));
906 /* Initially set CPU itself as the policy_cpu */
907 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
909 init_completion(&policy->kobj_unregister);
910 INIT_WORK(&policy->update, handle_update);
912 /* call driver. From then on the cpufreq must be able
913 * to accept all calls to ->verify and ->setpolicy for this CPU
915 ret = cpufreq_driver->init(policy);
917 pr_debug("initialization failed\n");
918 goto err_set_policy_cpu;
921 /* related cpus should atleast have policy->cpus */
922 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
925 * affected cpus must always be the one, which are online. We aren't
926 * managing offline cpus here.
928 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
930 policy->user_policy.min = policy->min;
931 policy->user_policy.max = policy->max;
933 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
934 CPUFREQ_START, policy);
936 #ifdef CONFIG_HOTPLUG_CPU
937 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
939 policy->governor = gov;
940 pr_debug("Restoring governor %s for cpu %d\n",
941 policy->governor->name, cpu);
945 ret = cpufreq_add_dev_interface(cpu, policy, dev);
947 goto err_out_unregister;
949 kobject_uevent(&policy->kobj, KOBJ_ADD);
950 module_put(cpufreq_driver->owner);
951 pr_debug("initialization complete\n");
956 spin_lock_irqsave(&cpufreq_driver_lock, flags);
957 for_each_cpu(j, policy->cpus)
958 per_cpu(cpufreq_cpu_data, j) = NULL;
959 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
961 kobject_put(&policy->kobj);
962 wait_for_completion(&policy->kobj_unregister);
965 per_cpu(cpufreq_policy_cpu, cpu) = -1;
966 free_cpumask_var(policy->related_cpus);
968 free_cpumask_var(policy->cpus);
972 module_put(cpufreq_driver->owner);
977 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
981 policy->last_cpu = policy->cpu;
984 for_each_cpu(j, policy->cpus)
985 per_cpu(cpufreq_policy_cpu, j) = cpu;
987 #ifdef CONFIG_CPU_FREQ_TABLE
988 cpufreq_frequency_table_update_policy_cpu(policy);
990 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
991 CPUFREQ_UPDATE_POLICY_CPU, policy);
995 * __cpufreq_remove_dev - remove a CPU device
997 * Removes the cpufreq interface for a CPU device.
998 * Caller should already have policy_rwsem in write mode for this CPU.
999 * This routine frees the rwsem before returning.
1001 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1003 unsigned int cpu = dev->id, ret, cpus;
1004 unsigned long flags;
1005 struct cpufreq_policy *data;
1006 struct kobject *kobj;
1007 struct completion *cmp;
1008 struct device *cpu_dev;
1010 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1012 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1014 data = per_cpu(cpufreq_cpu_data, cpu);
1015 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1017 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1020 pr_debug("%s: No cpu_data found\n", __func__);
1024 if (cpufreq_driver->target)
1025 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1027 #ifdef CONFIG_HOTPLUG_CPU
1028 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1032 WARN_ON(lock_policy_rwsem_write(cpu));
1033 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus);
1035 unlock_policy_rwsem_write(cpu);
1037 if (cpu != data->cpu) {
1038 sysfs_remove_link(&dev->kobj, "cpufreq");
1039 } else if (cpus > 1) {
1040 /* first sibling now owns the new sysfs dir */
1041 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1042 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1043 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1045 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1047 WARN_ON(lock_policy_rwsem_write(cpu));
1048 cpumask_set_cpu(cpu, data->cpus);
1050 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data;
1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1054 unlock_policy_rwsem_write(cpu);
1056 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1061 WARN_ON(lock_policy_rwsem_write(cpu));
1062 update_policy_cpu(data, cpu_dev->id);
1063 unlock_policy_rwsem_write(cpu);
1064 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1065 __func__, cpu_dev->id, cpu);
1068 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1069 cpufreq_cpu_put(data);
1071 /* If cpu is last user of policy, free policy */
1073 lock_policy_rwsem_read(cpu);
1075 cmp = &data->kobj_unregister;
1076 unlock_policy_rwsem_read(cpu);
1079 /* we need to make sure that the underlying kobj is actually
1080 * not referenced anymore by anybody before we proceed with
1083 pr_debug("waiting for dropping of refcount\n");
1084 wait_for_completion(cmp);
1085 pr_debug("wait complete\n");
1087 if (cpufreq_driver->exit)
1088 cpufreq_driver->exit(data);
1090 free_cpumask_var(data->related_cpus);
1091 free_cpumask_var(data->cpus);
1093 } else if (cpufreq_driver->target) {
1094 __cpufreq_governor(data, CPUFREQ_GOV_START);
1095 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1098 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1103 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1105 unsigned int cpu = dev->id;
1108 if (cpu_is_offline(cpu))
1111 retval = __cpufreq_remove_dev(dev, sif);
1116 static void handle_update(struct work_struct *work)
1118 struct cpufreq_policy *policy =
1119 container_of(work, struct cpufreq_policy, update);
1120 unsigned int cpu = policy->cpu;
1121 pr_debug("handle_update for cpu %u called\n", cpu);
1122 cpufreq_update_policy(cpu);
1126 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1128 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1129 * @new_freq: CPU frequency the CPU actually runs at
1131 * We adjust to current frequency first, and need to clean up later.
1132 * So either call to cpufreq_update_policy() or schedule handle_update()).
1134 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1135 unsigned int new_freq)
1137 struct cpufreq_freqs freqs;
1139 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1140 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1143 freqs.old = old_freq;
1144 freqs.new = new_freq;
1145 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1146 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1151 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1154 * This is the last known freq, without actually getting it from the driver.
1155 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1157 unsigned int cpufreq_quick_get(unsigned int cpu)
1159 struct cpufreq_policy *policy;
1160 unsigned int ret_freq = 0;
1162 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1163 return cpufreq_driver->get(cpu);
1165 policy = cpufreq_cpu_get(cpu);
1167 ret_freq = policy->cur;
1168 cpufreq_cpu_put(policy);
1173 EXPORT_SYMBOL(cpufreq_quick_get);
1176 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1179 * Just return the max possible frequency for a given CPU.
1181 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1183 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1184 unsigned int ret_freq = 0;
1187 ret_freq = policy->max;
1188 cpufreq_cpu_put(policy);
1193 EXPORT_SYMBOL(cpufreq_quick_get_max);
1196 static unsigned int __cpufreq_get(unsigned int cpu)
1198 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1199 unsigned int ret_freq = 0;
1201 if (!cpufreq_driver->get)
1204 ret_freq = cpufreq_driver->get(cpu);
1206 if (ret_freq && policy->cur &&
1207 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1208 /* verify no discrepancy between actual and
1209 saved value exists */
1210 if (unlikely(ret_freq != policy->cur)) {
1211 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1212 schedule_work(&policy->update);
1220 * cpufreq_get - get the current CPU frequency (in kHz)
1223 * Get the CPU current (static) CPU frequency
1225 unsigned int cpufreq_get(unsigned int cpu)
1227 unsigned int ret_freq = 0;
1228 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1233 if (unlikely(lock_policy_rwsem_read(cpu)))
1236 ret_freq = __cpufreq_get(cpu);
1238 unlock_policy_rwsem_read(cpu);
1241 cpufreq_cpu_put(policy);
1245 EXPORT_SYMBOL(cpufreq_get);
1247 static struct subsys_interface cpufreq_interface = {
1249 .subsys = &cpu_subsys,
1250 .add_dev = cpufreq_add_dev,
1251 .remove_dev = cpufreq_remove_dev,
1256 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1258 * This function is only executed for the boot processor. The other CPUs
1259 * have been put offline by means of CPU hotplug.
1261 static int cpufreq_bp_suspend(void)
1265 int cpu = smp_processor_id();
1266 struct cpufreq_policy *cpu_policy;
1268 pr_debug("suspending cpu %u\n", cpu);
1270 /* If there's no policy for the boot CPU, we have nothing to do. */
1271 cpu_policy = cpufreq_cpu_get(cpu);
1275 if (cpufreq_driver->suspend) {
1276 ret = cpufreq_driver->suspend(cpu_policy);
1278 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1279 "step on CPU %u\n", cpu_policy->cpu);
1282 cpufreq_cpu_put(cpu_policy);
1287 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1289 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1290 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1291 * restored. It will verify that the current freq is in sync with
1292 * what we believe it to be. This is a bit later than when it
1293 * should be, but nonethteless it's better than calling
1294 * cpufreq_driver->get() here which might re-enable interrupts...
1296 * This function is only executed for the boot CPU. The other CPUs have not
1297 * been turned on yet.
1299 static void cpufreq_bp_resume(void)
1303 int cpu = smp_processor_id();
1304 struct cpufreq_policy *cpu_policy;
1306 pr_debug("resuming cpu %u\n", cpu);
1308 /* If there's no policy for the boot CPU, we have nothing to do. */
1309 cpu_policy = cpufreq_cpu_get(cpu);
1313 if (cpufreq_driver->resume) {
1314 ret = cpufreq_driver->resume(cpu_policy);
1316 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1317 "step on CPU %u\n", cpu_policy->cpu);
1322 schedule_work(&cpu_policy->update);
1325 cpufreq_cpu_put(cpu_policy);
1328 static struct syscore_ops cpufreq_syscore_ops = {
1329 .suspend = cpufreq_bp_suspend,
1330 .resume = cpufreq_bp_resume,
1334 * cpufreq_get_current_driver - return current driver's name
1336 * Return the name string of the currently loaded cpufreq driver
1339 const char *cpufreq_get_current_driver(void)
1342 return cpufreq_driver->name;
1346 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1348 /*********************************************************************
1349 * NOTIFIER LISTS INTERFACE *
1350 *********************************************************************/
1353 * cpufreq_register_notifier - register a driver with cpufreq
1354 * @nb: notifier function to register
1355 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1357 * Add a driver to one of two lists: either a list of drivers that
1358 * are notified about clock rate changes (once before and once after
1359 * the transition), or a list of drivers that are notified about
1360 * changes in cpufreq policy.
1362 * This function may sleep, and has the same return conditions as
1363 * blocking_notifier_chain_register.
1365 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1369 if (cpufreq_disabled())
1372 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1375 case CPUFREQ_TRANSITION_NOTIFIER:
1376 ret = srcu_notifier_chain_register(
1377 &cpufreq_transition_notifier_list, nb);
1379 case CPUFREQ_POLICY_NOTIFIER:
1380 ret = blocking_notifier_chain_register(
1381 &cpufreq_policy_notifier_list, nb);
1389 EXPORT_SYMBOL(cpufreq_register_notifier);
1393 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1394 * @nb: notifier block to be unregistered
1395 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1397 * Remove a driver from the CPU frequency notifier list.
1399 * This function may sleep, and has the same return conditions as
1400 * blocking_notifier_chain_unregister.
1402 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1406 if (cpufreq_disabled())
1410 case CPUFREQ_TRANSITION_NOTIFIER:
1411 ret = srcu_notifier_chain_unregister(
1412 &cpufreq_transition_notifier_list, nb);
1414 case CPUFREQ_POLICY_NOTIFIER:
1415 ret = blocking_notifier_chain_unregister(
1416 &cpufreq_policy_notifier_list, nb);
1424 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1427 /*********************************************************************
1429 *********************************************************************/
1432 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1433 unsigned int target_freq,
1434 unsigned int relation)
1436 int retval = -EINVAL;
1437 unsigned int old_target_freq = target_freq;
1439 if (cpufreq_disabled())
1442 /* Make sure that target_freq is within supported range */
1443 if (target_freq > policy->max)
1444 target_freq = policy->max;
1445 if (target_freq < policy->min)
1446 target_freq = policy->min;
1448 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1449 policy->cpu, target_freq, relation, old_target_freq);
1451 if (target_freq == policy->cur)
1454 if (cpufreq_driver->target)
1455 retval = cpufreq_driver->target(policy, target_freq, relation);
1459 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1461 int cpufreq_driver_target(struct cpufreq_policy *policy,
1462 unsigned int target_freq,
1463 unsigned int relation)
1467 policy = cpufreq_cpu_get(policy->cpu);
1471 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1474 ret = __cpufreq_driver_target(policy, target_freq, relation);
1476 unlock_policy_rwsem_write(policy->cpu);
1479 cpufreq_cpu_put(policy);
1483 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1485 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1489 if (cpufreq_disabled())
1492 if (!cpufreq_driver->getavg)
1495 policy = cpufreq_cpu_get(policy->cpu);
1499 ret = cpufreq_driver->getavg(policy, cpu);
1501 cpufreq_cpu_put(policy);
1504 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1507 * when "event" is CPUFREQ_GOV_LIMITS
1510 static int __cpufreq_governor(struct cpufreq_policy *policy,
1515 /* Only must be defined when default governor is known to have latency
1516 restrictions, like e.g. conservative or ondemand.
1517 That this is the case is already ensured in Kconfig
1519 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1520 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1522 struct cpufreq_governor *gov = NULL;
1525 if (policy->governor->max_transition_latency &&
1526 policy->cpuinfo.transition_latency >
1527 policy->governor->max_transition_latency) {
1531 printk(KERN_WARNING "%s governor failed, too long"
1532 " transition latency of HW, fallback"
1533 " to %s governor\n",
1534 policy->governor->name,
1536 policy->governor = gov;
1540 if (!try_module_get(policy->governor->owner))
1543 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1544 policy->cpu, event);
1545 ret = policy->governor->governor(policy, event);
1547 if (event == CPUFREQ_GOV_START)
1548 policy->governor->initialized++;
1549 else if (event == CPUFREQ_GOV_STOP)
1550 policy->governor->initialized--;
1552 /* we keep one module reference alive for
1553 each CPU governed by this CPU */
1554 if ((event != CPUFREQ_GOV_START) || ret)
1555 module_put(policy->governor->owner);
1556 if ((event == CPUFREQ_GOV_STOP) && !ret)
1557 module_put(policy->governor->owner);
1563 int cpufreq_register_governor(struct cpufreq_governor *governor)
1570 if (cpufreq_disabled())
1573 mutex_lock(&cpufreq_governor_mutex);
1575 governor->initialized = 0;
1577 if (__find_governor(governor->name) == NULL) {
1579 list_add(&governor->governor_list, &cpufreq_governor_list);
1582 mutex_unlock(&cpufreq_governor_mutex);
1585 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1588 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1590 #ifdef CONFIG_HOTPLUG_CPU
1597 if (cpufreq_disabled())
1600 #ifdef CONFIG_HOTPLUG_CPU
1601 for_each_present_cpu(cpu) {
1602 if (cpu_online(cpu))
1604 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1605 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1609 mutex_lock(&cpufreq_governor_mutex);
1610 list_del(&governor->governor_list);
1611 mutex_unlock(&cpufreq_governor_mutex);
1614 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1618 /*********************************************************************
1619 * POLICY INTERFACE *
1620 *********************************************************************/
1623 * cpufreq_get_policy - get the current cpufreq_policy
1624 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1627 * Reads the current cpufreq policy.
1629 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1631 struct cpufreq_policy *cpu_policy;
1635 cpu_policy = cpufreq_cpu_get(cpu);
1639 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1641 cpufreq_cpu_put(cpu_policy);
1644 EXPORT_SYMBOL(cpufreq_get_policy);
1648 * data : current policy.
1649 * policy : policy to be set.
1651 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1652 struct cpufreq_policy *policy)
1656 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1657 policy->min, policy->max);
1659 memcpy(&policy->cpuinfo, &data->cpuinfo,
1660 sizeof(struct cpufreq_cpuinfo));
1662 if (policy->min > data->max || policy->max < data->min) {
1667 /* verify the cpu speed can be set within this limit */
1668 ret = cpufreq_driver->verify(policy);
1672 /* adjust if necessary - all reasons */
1673 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1674 CPUFREQ_ADJUST, policy);
1676 /* adjust if necessary - hardware incompatibility*/
1677 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1678 CPUFREQ_INCOMPATIBLE, policy);
1680 /* verify the cpu speed can be set within this limit,
1681 which might be different to the first one */
1682 ret = cpufreq_driver->verify(policy);
1686 /* notification of the new policy */
1687 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1688 CPUFREQ_NOTIFY, policy);
1690 data->min = policy->min;
1691 data->max = policy->max;
1693 pr_debug("new min and max freqs are %u - %u kHz\n",
1694 data->min, data->max);
1696 if (cpufreq_driver->setpolicy) {
1697 data->policy = policy->policy;
1698 pr_debug("setting range\n");
1699 ret = cpufreq_driver->setpolicy(policy);
1701 if (policy->governor != data->governor) {
1702 /* save old, working values */
1703 struct cpufreq_governor *old_gov = data->governor;
1705 pr_debug("governor switch\n");
1707 /* end old governor */
1709 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1711 /* start new governor */
1712 data->governor = policy->governor;
1713 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1714 /* new governor failed, so re-start old one */
1715 pr_debug("starting governor %s failed\n",
1716 data->governor->name);
1718 data->governor = old_gov;
1719 __cpufreq_governor(data,
1725 /* might be a policy change, too, so fall through */
1727 pr_debug("governor: change or update limits\n");
1728 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1736 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1737 * @cpu: CPU which shall be re-evaluated
1739 * Useful for policy notifiers which have different necessities
1740 * at different times.
1742 int cpufreq_update_policy(unsigned int cpu)
1744 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1745 struct cpufreq_policy policy;
1753 if (unlikely(lock_policy_rwsem_write(cpu))) {
1758 pr_debug("updating policy for CPU %u\n", cpu);
1759 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1760 policy.min = data->user_policy.min;
1761 policy.max = data->user_policy.max;
1762 policy.policy = data->user_policy.policy;
1763 policy.governor = data->user_policy.governor;
1765 /* BIOS might change freq behind our back
1766 -> ask driver for current freq and notify governors about a change */
1767 if (cpufreq_driver->get) {
1768 policy.cur = cpufreq_driver->get(cpu);
1770 pr_debug("Driver did not initialize current freq");
1771 data->cur = policy.cur;
1773 if (data->cur != policy.cur)
1774 cpufreq_out_of_sync(cpu, data->cur,
1779 ret = __cpufreq_set_policy(data, &policy);
1781 unlock_policy_rwsem_write(cpu);
1784 cpufreq_cpu_put(data);
1788 EXPORT_SYMBOL(cpufreq_update_policy);
1790 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1791 unsigned long action, void *hcpu)
1793 unsigned int cpu = (unsigned long)hcpu;
1796 dev = get_cpu_device(cpu);
1800 case CPU_ONLINE_FROZEN:
1801 cpufreq_add_dev(dev, NULL);
1803 case CPU_DOWN_PREPARE:
1804 case CPU_DOWN_PREPARE_FROZEN:
1805 __cpufreq_remove_dev(dev, NULL);
1807 case CPU_DOWN_FAILED:
1808 case CPU_DOWN_FAILED_FROZEN:
1809 cpufreq_add_dev(dev, NULL);
1816 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1817 .notifier_call = cpufreq_cpu_callback,
1820 /*********************************************************************
1821 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1822 *********************************************************************/
1825 * cpufreq_register_driver - register a CPU Frequency driver
1826 * @driver_data: A struct cpufreq_driver containing the values#
1827 * submitted by the CPU Frequency driver.
1829 * Registers a CPU Frequency driver to this core code. This code
1830 * returns zero on success, -EBUSY when another driver got here first
1831 * (and isn't unregistered in the meantime).
1834 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1836 unsigned long flags;
1839 if (cpufreq_disabled())
1842 if (!driver_data || !driver_data->verify || !driver_data->init ||
1843 ((!driver_data->setpolicy) && (!driver_data->target)))
1846 pr_debug("trying to register driver %s\n", driver_data->name);
1848 if (driver_data->setpolicy)
1849 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1851 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1852 if (cpufreq_driver) {
1853 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1856 cpufreq_driver = driver_data;
1857 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1859 ret = subsys_interface_register(&cpufreq_interface);
1861 goto err_null_driver;
1863 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1867 /* check for at least one working CPU */
1868 for (i = 0; i < nr_cpu_ids; i++)
1869 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1874 /* if all ->init() calls failed, unregister */
1876 pr_debug("no CPU initialized for driver %s\n",
1882 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1883 pr_debug("driver %s up and running\n", driver_data->name);
1887 subsys_interface_unregister(&cpufreq_interface);
1889 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1890 cpufreq_driver = NULL;
1891 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1894 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1898 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1900 * Unregister the current CPUFreq driver. Only call this if you have
1901 * the right to do so, i.e. if you have succeeded in initialising before!
1902 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1903 * currently not initialised.
1905 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1907 unsigned long flags;
1909 if (!cpufreq_driver || (driver != cpufreq_driver))
1912 pr_debug("unregistering driver %s\n", driver->name);
1914 subsys_interface_unregister(&cpufreq_interface);
1915 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1917 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1918 cpufreq_driver = NULL;
1919 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1923 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1925 static int __init cpufreq_core_init(void)
1929 if (cpufreq_disabled())
1932 for_each_possible_cpu(cpu) {
1933 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1934 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1937 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1938 BUG_ON(!cpufreq_global_kobject);
1939 register_syscore_ops(&cpufreq_syscore_ops);
1943 core_initcall(cpufreq_core_init);