drm: Cocci spatch "memdup.spatch"
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *      Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *      Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/suspend.h>
38 #include <linux/tick.h>
39
40 #include <trace/events/power.h>
41
42 /**
43  * The "cpufreq driver" - the arch- or hardware-dependent low
44  * level driver of CPUFreq support, and its spinlock. This lock
45  * also protects the cpufreq_cpu_data array.
46  */
47 static struct cpufreq_driver *cpufreq_driver;
48 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
49 #ifdef CONFIG_HOTPLUG_CPU
50 /* This one keeps track of the previously set governor of a removed CPU */
51 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
52 #endif
53 static DEFINE_RWLOCK(cpufreq_driver_lock);
54 static DEFINE_MUTEX(cpufreq_governor_lock);
55
56 /* Flag to suspend/resume CPUFreq governors */
57 static bool cpufreq_suspended;
58
59 static inline bool has_target(void)
60 {
61         return cpufreq_driver->target;
62 }
63
64 /*
65  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
66  * all cpufreq/hotplug/workqueue/etc related lock issues.
67  *
68  * The rules for this semaphore:
69  * - Any routine that wants to read from the policy structure will
70  *   do a down_read on this semaphore.
71  * - Any routine that will write to the policy structure and/or may take away
72  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
73  *   mode before doing so.
74  *
75  * Additional rules:
76  * - Governor routines that can be called in cpufreq hotplug path should not
77  *   take this sem as top level hotplug notifier handler takes this.
78  * - Lock should not be held across
79  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
80  */
81 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
82 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
83
84 #define lock_policy_rwsem(mode, cpu)                                    \
85 static int lock_policy_rwsem_##mode(int cpu)                            \
86 {                                                                       \
87         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
88         BUG_ON(policy_cpu == -1);                                       \
89         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
90                                                                         \
91         return 0;                                                       \
92 }
93
94 lock_policy_rwsem(read, cpu);
95 lock_policy_rwsem(write, cpu);
96
97 #define unlock_policy_rwsem(mode, cpu)                                  \
98 static void unlock_policy_rwsem_##mode(int cpu)                         \
99 {                                                                       \
100         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
101         BUG_ON(policy_cpu == -1);                                       \
102         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
103 }
104
105 unlock_policy_rwsem(read, cpu);
106 unlock_policy_rwsem(write, cpu);
107
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
110                 unsigned int event);
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
113
114 /**
115  * Two notifier lists: the "policy" list is involved in the
116  * validation process for a new CPU frequency policy; the
117  * "transition" list for kernel code that needs to handle
118  * changes to devices when the CPU clock speed changes.
119  * The mutex locks both lists.
120  */
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
123
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
126 {
127         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128         init_cpufreq_transition_notifier_list_called = true;
129         return 0;
130 }
131 pure_initcall(init_cpufreq_transition_notifier_list);
132
133 static int off __read_mostly;
134 static int cpufreq_disabled(void)
135 {
136         return off;
137 }
138 void disable_cpufreq(void)
139 {
140         off = 1;
141 }
142 static LIST_HEAD(cpufreq_governor_list);
143 static DEFINE_MUTEX(cpufreq_governor_mutex);
144
145 bool have_governor_per_policy(void)
146 {
147         return cpufreq_driver->have_governor_per_policy;
148 }
149 EXPORT_SYMBOL_GPL(have_governor_per_policy);
150
151 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
152 {
153         if (have_governor_per_policy())
154                 return &policy->kobj;
155         else
156                 return cpufreq_global_kobject;
157 }
158 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
159
160 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
161 {
162         u64 idle_time;
163         u64 cur_wall_time;
164         u64 busy_time;
165
166         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
167
168         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
169         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
170         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
171         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
172         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
173         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
174
175         idle_time = cur_wall_time - busy_time;
176         if (wall)
177                 *wall = cputime_to_usecs(cur_wall_time);
178
179         return cputime_to_usecs(idle_time);
180 }
181
182 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
183 {
184         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
185
186         if (idle_time == -1ULL)
187                 return get_cpu_idle_time_jiffy(cpu, wall);
188         else if (!io_busy)
189                 idle_time += get_cpu_iowait_time_us(cpu, wall);
190
191         return idle_time;
192 }
193 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
194
195 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
196 {
197         struct cpufreq_policy *data;
198         unsigned long flags;
199
200         if (cpu >= nr_cpu_ids)
201                 goto err_out;
202
203         /* get the cpufreq driver */
204         read_lock_irqsave(&cpufreq_driver_lock, flags);
205
206         if (!cpufreq_driver)
207                 goto err_out_unlock;
208
209         if (!try_module_get(cpufreq_driver->owner))
210                 goto err_out_unlock;
211
212
213         /* get the CPU */
214         data = per_cpu(cpufreq_cpu_data, cpu);
215
216         if (!data)
217                 goto err_out_put_module;
218
219         if (!sysfs && !kobject_get(&data->kobj))
220                 goto err_out_put_module;
221
222         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
223         return data;
224
225 err_out_put_module:
226         module_put(cpufreq_driver->owner);
227 err_out_unlock:
228         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
229 err_out:
230         return NULL;
231 }
232
233 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
234 {
235         if (cpufreq_disabled())
236                 return NULL;
237
238         return __cpufreq_cpu_get(cpu, false);
239 }
240 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241
242 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
243 {
244         return __cpufreq_cpu_get(cpu, true);
245 }
246
247 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
248 {
249         if (!sysfs)
250                 kobject_put(&data->kobj);
251         module_put(cpufreq_driver->owner);
252 }
253
254 void cpufreq_cpu_put(struct cpufreq_policy *data)
255 {
256         if (cpufreq_disabled())
257                 return;
258
259         __cpufreq_cpu_put(data, false);
260 }
261 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
262
263 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
264 {
265         __cpufreq_cpu_put(data, true);
266 }
267
268 /*********************************************************************
269  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
270  *********************************************************************/
271
272 /**
273  * adjust_jiffies - adjust the system "loops_per_jiffy"
274  *
275  * This function alters the system "loops_per_jiffy" for the clock
276  * speed change. Note that loops_per_jiffy cannot be updated on SMP
277  * systems as each CPU might be scaled differently. So, use the arch
278  * per-CPU loops_per_jiffy value wherever possible.
279  */
280 #ifndef CONFIG_SMP
281 static unsigned long l_p_j_ref;
282 static unsigned int  l_p_j_ref_freq;
283
284 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
285 {
286         if (ci->flags & CPUFREQ_CONST_LOOPS)
287                 return;
288
289         if (!l_p_j_ref_freq) {
290                 l_p_j_ref = loops_per_jiffy;
291                 l_p_j_ref_freq = ci->old;
292                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
293                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
294         }
295         if ((val == CPUFREQ_POSTCHANGE  && ci->old != ci->new) ||
296             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
297                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
298                                                                 ci->new);
299                 pr_debug("scaling loops_per_jiffy to %lu "
300                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
301         }
302 }
303 #else
304 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
305 {
306         return;
307 }
308 #endif
309
310
311 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
312                 struct cpufreq_freqs *freqs, unsigned int state)
313 {
314         BUG_ON(irqs_disabled());
315
316         if (cpufreq_disabled())
317                 return;
318
319         freqs->flags = cpufreq_driver->flags;
320         pr_debug("notification %u of frequency transition to %u kHz\n",
321                 state, freqs->new);
322
323         switch (state) {
324
325         case CPUFREQ_PRECHANGE:
326                 /* detect if the driver reported a value as "old frequency"
327                  * which is not equal to what the cpufreq core thinks is
328                  * "old frequency".
329                  */
330                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
331                         if ((policy) && (policy->cpu == freqs->cpu) &&
332                             (policy->cur) && (policy->cur != freqs->old)) {
333                                 pr_debug("Warning: CPU frequency is"
334                                         " %u, cpufreq assumed %u kHz.\n",
335                                         freqs->old, policy->cur);
336                                 freqs->old = policy->cur;
337                         }
338                 }
339                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
340                                 CPUFREQ_PRECHANGE, freqs);
341                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
342                 break;
343
344         case CPUFREQ_POSTCHANGE:
345                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
346                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
347                         (unsigned long)freqs->cpu);
348                 trace_cpu_frequency(freqs->new, freqs->cpu);
349                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
350                                 CPUFREQ_POSTCHANGE, freqs);
351                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
352                         policy->cur = freqs->new;
353                 break;
354         }
355 }
356 /**
357  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
358  * on frequency transition.
359  *
360  * This function calls the transition notifiers and the "adjust_jiffies"
361  * function. It is called twice on all CPU frequency changes that have
362  * external effects.
363  */
364 void cpufreq_notify_transition(struct cpufreq_policy *policy,
365                 struct cpufreq_freqs *freqs, unsigned int state)
366 {
367         for_each_cpu(freqs->cpu, policy->cpus)
368                 __cpufreq_notify_transition(policy, freqs, state);
369 }
370 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
371
372
373
374 /*********************************************************************
375  *                          SYSFS INTERFACE                          *
376  *********************************************************************/
377
378 static struct cpufreq_governor *__find_governor(const char *str_governor)
379 {
380         struct cpufreq_governor *t;
381
382         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
383                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
384                         return t;
385
386         return NULL;
387 }
388
389 /**
390  * cpufreq_parse_governor - parse a governor string
391  */
392 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
393                                 struct cpufreq_governor **governor)
394 {
395         int err = -EINVAL;
396
397         if (!cpufreq_driver)
398                 goto out;
399
400         if (cpufreq_driver->setpolicy) {
401                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
402                         *policy = CPUFREQ_POLICY_PERFORMANCE;
403                         err = 0;
404                 } else if (!strnicmp(str_governor, "powersave",
405                                                 CPUFREQ_NAME_LEN)) {
406                         *policy = CPUFREQ_POLICY_POWERSAVE;
407                         err = 0;
408                 }
409         } else if (cpufreq_driver->target) {
410                 struct cpufreq_governor *t;
411
412                 mutex_lock(&cpufreq_governor_mutex);
413
414                 t = __find_governor(str_governor);
415
416                 if (t == NULL) {
417                         int ret;
418
419                         mutex_unlock(&cpufreq_governor_mutex);
420                         ret = request_module("cpufreq_%s", str_governor);
421                         mutex_lock(&cpufreq_governor_mutex);
422
423                         if (ret == 0)
424                                 t = __find_governor(str_governor);
425                 }
426
427                 if (t != NULL) {
428                         *governor = t;
429                         err = 0;
430                 }
431
432                 mutex_unlock(&cpufreq_governor_mutex);
433         }
434 out:
435         return err;
436 }
437
438
439 /**
440  * cpufreq_per_cpu_attr_read() / show_##file_name() -
441  * print out cpufreq information
442  *
443  * Write out information from cpufreq_driver->policy[cpu]; object must be
444  * "unsigned int".
445  */
446
447 #define show_one(file_name, object)                     \
448 static ssize_t show_##file_name                         \
449 (struct cpufreq_policy *policy, char *buf)              \
450 {                                                       \
451         return sprintf(buf, "%u\n", policy->object);    \
452 }
453
454 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
455 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
456 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
457 show_one(scaling_min_freq, min);
458 show_one(scaling_max_freq, max);
459 show_one(scaling_cur_freq, cur);
460
461 static int __cpufreq_set_policy(struct cpufreq_policy *data,
462                                 struct cpufreq_policy *policy);
463
464 /**
465  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
466  */
467 #define store_one(file_name, object)                    \
468 static ssize_t store_##file_name                                        \
469 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
470 {                                                                       \
471         unsigned int ret;                                               \
472         struct cpufreq_policy new_policy;                               \
473                                                                         \
474         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
475         if (ret)                                                        \
476                 return -EINVAL;                                         \
477                                                                         \
478         ret = sscanf(buf, "%u", &new_policy.object);                    \
479         if (ret != 1)                                                   \
480                 return -EINVAL;                                         \
481                                                                         \
482         ret = __cpufreq_set_policy(policy, &new_policy);                \
483         policy->user_policy.object = policy->object;                    \
484                                                                         \
485         return ret ? ret : count;                                       \
486 }
487
488 store_one(scaling_min_freq, min);
489 store_one(scaling_max_freq, max);
490
491 /**
492  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
493  */
494 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
495                                         char *buf)
496 {
497         unsigned int cur_freq = __cpufreq_get(policy->cpu);
498         if (!cur_freq)
499                 return sprintf(buf, "<unknown>");
500         return sprintf(buf, "%u\n", cur_freq);
501 }
502
503
504 /**
505  * show_scaling_governor - show the current policy for the specified CPU
506  */
507 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
508 {
509         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
510                 return sprintf(buf, "powersave\n");
511         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
512                 return sprintf(buf, "performance\n");
513         else if (policy->governor)
514                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
515                                 policy->governor->name);
516         return -EINVAL;
517 }
518
519
520 /**
521  * store_scaling_governor - store policy for the specified CPU
522  */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524                                         const char *buf, size_t count)
525 {
526         unsigned int ret;
527         char    str_governor[16];
528         struct cpufreq_policy new_policy;
529
530         ret = cpufreq_get_policy(&new_policy, policy->cpu);
531         if (ret)
532                 return ret;
533
534         ret = sscanf(buf, "%15s", str_governor);
535         if (ret != 1)
536                 return -EINVAL;
537
538         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539                                                 &new_policy.governor))
540                 return -EINVAL;
541
542         /* Do not use cpufreq_set_policy here or the user_policy.max
543            will be wrongly overridden */
544         ret = __cpufreq_set_policy(policy, &new_policy);
545
546         policy->user_policy.policy = policy->policy;
547         policy->user_policy.governor = policy->governor;
548
549         if (ret)
550                 return ret;
551         else
552                 return count;
553 }
554
555 /**
556  * show_scaling_driver - show the cpufreq driver currently loaded
557  */
558 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
559 {
560         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
561 }
562
563 /**
564  * show_scaling_available_governors - show the available CPUfreq governors
565  */
566 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
567                                                 char *buf)
568 {
569         ssize_t i = 0;
570         struct cpufreq_governor *t;
571
572         if (!cpufreq_driver->target) {
573                 i += sprintf(buf, "performance powersave");
574                 goto out;
575         }
576
577         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
579                     - (CPUFREQ_NAME_LEN + 2)))
580                         goto out;
581                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
582         }
583 out:
584         i += sprintf(&buf[i], "\n");
585         return i;
586 }
587
588 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
589 {
590         ssize_t i = 0;
591         unsigned int cpu;
592
593         for_each_cpu(cpu, mask) {
594                 if (i)
595                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
596                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
597                 if (i >= (PAGE_SIZE - 5))
598                         break;
599         }
600         i += sprintf(&buf[i], "\n");
601         return i;
602 }
603
604 /**
605  * show_related_cpus - show the CPUs affected by each transition even if
606  * hw coordination is in use
607  */
608 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
609 {
610         return show_cpus(policy->related_cpus, buf);
611 }
612
613 /**
614  * show_affected_cpus - show the CPUs affected by each transition
615  */
616 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
617 {
618         return show_cpus(policy->cpus, buf);
619 }
620
621 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
622                                         const char *buf, size_t count)
623 {
624         unsigned int freq = 0;
625         unsigned int ret;
626
627         if (!policy->governor || !policy->governor->store_setspeed)
628                 return -EINVAL;
629
630         ret = sscanf(buf, "%u", &freq);
631         if (ret != 1)
632                 return -EINVAL;
633
634         policy->governor->store_setspeed(policy, freq);
635
636         return count;
637 }
638
639 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
640 {
641         if (!policy->governor || !policy->governor->show_setspeed)
642                 return sprintf(buf, "<unsupported>\n");
643
644         return policy->governor->show_setspeed(policy, buf);
645 }
646
647 /**
648  * show_bios_limit - show the current cpufreq HW/BIOS limitation
649  */
650 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
651 {
652         unsigned int limit;
653         int ret;
654         if (cpufreq_driver->bios_limit) {
655                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
656                 if (!ret)
657                         return sprintf(buf, "%u\n", limit);
658         }
659         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
660 }
661
662 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
663 cpufreq_freq_attr_ro(cpuinfo_min_freq);
664 cpufreq_freq_attr_ro(cpuinfo_max_freq);
665 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
666 cpufreq_freq_attr_ro(scaling_available_governors);
667 cpufreq_freq_attr_ro(scaling_driver);
668 cpufreq_freq_attr_ro(scaling_cur_freq);
669 cpufreq_freq_attr_ro(bios_limit);
670 cpufreq_freq_attr_ro(related_cpus);
671 cpufreq_freq_attr_ro(affected_cpus);
672 cpufreq_freq_attr_rw(scaling_min_freq);
673 cpufreq_freq_attr_rw(scaling_max_freq);
674 cpufreq_freq_attr_rw(scaling_governor);
675 cpufreq_freq_attr_rw(scaling_setspeed);
676
677 static struct attribute *default_attrs[] = {
678         &cpuinfo_min_freq.attr,
679         &cpuinfo_max_freq.attr,
680         &cpuinfo_transition_latency.attr,
681         &scaling_min_freq.attr,
682         &scaling_max_freq.attr,
683         &affected_cpus.attr,
684         &related_cpus.attr,
685         &scaling_governor.attr,
686         &scaling_driver.attr,
687         &scaling_available_governors.attr,
688         &scaling_setspeed.attr,
689         NULL
690 };
691
692 struct kobject *cpufreq_global_kobject;
693 EXPORT_SYMBOL(cpufreq_global_kobject);
694
695 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696 #define to_attr(a) container_of(a, struct freq_attr, attr)
697
698 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 {
700         struct cpufreq_policy *policy = to_policy(kobj);
701         struct freq_attr *fattr = to_attr(attr);
702         ssize_t ret = -EINVAL;
703         policy = cpufreq_cpu_get_sysfs(policy->cpu);
704         if (!policy)
705                 goto no_policy;
706
707         if (lock_policy_rwsem_read(policy->cpu) < 0)
708                 goto fail;
709
710         if (fattr->show)
711                 ret = fattr->show(policy, buf);
712         else
713                 ret = -EIO;
714
715         unlock_policy_rwsem_read(policy->cpu);
716 fail:
717         cpufreq_cpu_put_sysfs(policy);
718 no_policy:
719         return ret;
720 }
721
722 static ssize_t store(struct kobject *kobj, struct attribute *attr,
723                      const char *buf, size_t count)
724 {
725         struct cpufreq_policy *policy = to_policy(kobj);
726         struct freq_attr *fattr = to_attr(attr);
727         ssize_t ret = -EINVAL;
728         policy = cpufreq_cpu_get_sysfs(policy->cpu);
729         if (!policy)
730                 goto no_policy;
731
732         if (lock_policy_rwsem_write(policy->cpu) < 0)
733                 goto fail;
734
735         if (fattr->store)
736                 ret = fattr->store(policy, buf, count);
737         else
738                 ret = -EIO;
739
740         unlock_policy_rwsem_write(policy->cpu);
741 fail:
742         cpufreq_cpu_put_sysfs(policy);
743 no_policy:
744         return ret;
745 }
746
747 static void cpufreq_sysfs_release(struct kobject *kobj)
748 {
749         struct cpufreq_policy *policy = to_policy(kobj);
750         pr_debug("last reference is dropped\n");
751         complete(&policy->kobj_unregister);
752 }
753
754 static const struct sysfs_ops sysfs_ops = {
755         .show   = show,
756         .store  = store,
757 };
758
759 static struct kobj_type ktype_cpufreq = {
760         .sysfs_ops      = &sysfs_ops,
761         .default_attrs  = default_attrs,
762         .release        = cpufreq_sysfs_release,
763 };
764
765 /* symlink affected CPUs */
766 static int cpufreq_add_dev_symlink(unsigned int cpu,
767                                    struct cpufreq_policy *policy)
768 {
769         unsigned int j;
770         int ret = 0;
771
772         for_each_cpu(j, policy->cpus) {
773                 struct cpufreq_policy *managed_policy;
774                 struct device *cpu_dev;
775
776                 if (j == cpu)
777                         continue;
778
779                 pr_debug("CPU %u already managed, adding link\n", j);
780                 managed_policy = cpufreq_cpu_get(cpu);
781                 cpu_dev = get_cpu_device(j);
782                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
783                                         "cpufreq");
784                 if (ret) {
785                         cpufreq_cpu_put(managed_policy);
786                         return ret;
787                 }
788         }
789         return ret;
790 }
791
792 static int cpufreq_add_dev_interface(unsigned int cpu,
793                                      struct cpufreq_policy *policy,
794                                      struct device *dev)
795 {
796         struct cpufreq_policy new_policy;
797         struct freq_attr **drv_attr;
798         unsigned long flags;
799         int ret = 0;
800         unsigned int j;
801
802         /* prepare interface data */
803         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
804                                    &dev->kobj, "cpufreq");
805         if (ret)
806                 return ret;
807
808         /* set up files for this cpu device */
809         drv_attr = cpufreq_driver->attr;
810         while ((drv_attr) && (*drv_attr)) {
811                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
812                 if (ret)
813                         goto err_out_kobj_put;
814                 drv_attr++;
815         }
816         if (cpufreq_driver->get) {
817                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
818                 if (ret)
819                         goto err_out_kobj_put;
820         }
821         if (cpufreq_driver->target) {
822                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
823                 if (ret)
824                         goto err_out_kobj_put;
825         }
826         if (cpufreq_driver->bios_limit) {
827                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
828                 if (ret)
829                         goto err_out_kobj_put;
830         }
831
832         write_lock_irqsave(&cpufreq_driver_lock, flags);
833         for_each_cpu(j, policy->cpus) {
834                 per_cpu(cpufreq_cpu_data, j) = policy;
835                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
836         }
837         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
838
839         ret = cpufreq_add_dev_symlink(cpu, policy);
840         if (ret)
841                 goto err_out_kobj_put;
842
843         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
844         /* assure that the starting sequence is run in __cpufreq_set_policy */
845         policy->governor = NULL;
846
847         /* set default policy */
848         ret = __cpufreq_set_policy(policy, &new_policy);
849         policy->user_policy.policy = policy->policy;
850         policy->user_policy.governor = policy->governor;
851
852         if (ret) {
853                 pr_debug("setting policy failed\n");
854                 if (cpufreq_driver->exit)
855                         cpufreq_driver->exit(policy);
856         }
857         return ret;
858
859 err_out_kobj_put:
860         kobject_put(&policy->kobj);
861         wait_for_completion(&policy->kobj_unregister);
862         return ret;
863 }
864
865 #ifdef CONFIG_HOTPLUG_CPU
866 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
867                                   struct device *dev)
868 {
869         struct cpufreq_policy *policy;
870         int ret = 0, has_target = !!cpufreq_driver->target;
871         unsigned long flags;
872
873         policy = cpufreq_cpu_get(sibling);
874         WARN_ON(!policy);
875
876         if (has_target)
877                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
878
879         lock_policy_rwsem_write(sibling);
880
881         write_lock_irqsave(&cpufreq_driver_lock, flags);
882
883         cpumask_set_cpu(cpu, policy->cpus);
884         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
885         per_cpu(cpufreq_cpu_data, cpu) = policy;
886         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
887
888         unlock_policy_rwsem_write(sibling);
889
890         if (has_target) {
891                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
892                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
893         }
894
895         ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
896         if (ret) {
897                 cpufreq_cpu_put(policy);
898                 return ret;
899         }
900
901         return 0;
902 }
903 #endif
904
905 /**
906  * cpufreq_add_dev - add a CPU device
907  *
908  * Adds the cpufreq interface for a CPU device.
909  *
910  * The Oracle says: try running cpufreq registration/unregistration concurrently
911  * with with cpu hotplugging and all hell will break loose. Tried to clean this
912  * mess up, but more thorough testing is needed. - Mathieu
913  */
914 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
915 {
916         unsigned int j, cpu = dev->id;
917         int ret = -ENOMEM;
918         struct cpufreq_policy *policy;
919         unsigned long flags;
920 #ifdef CONFIG_HOTPLUG_CPU
921         struct cpufreq_governor *gov;
922         int sibling;
923 #endif
924
925         if (cpu_is_offline(cpu))
926                 return 0;
927
928
929 #ifdef CONFIG_SMP
930         /* check whether a different CPU already registered this
931          * CPU because it is in the same boat. */
932         policy = cpufreq_cpu_get(cpu);
933         if (unlikely(policy)) {
934                 cpufreq_cpu_put(policy);
935                 return 0;
936         }
937
938 #ifdef CONFIG_HOTPLUG_CPU
939         /* Check if this cpu was hot-unplugged earlier and has siblings */
940         read_lock_irqsave(&cpufreq_driver_lock, flags);
941         for_each_online_cpu(sibling) {
942                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
943                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
944                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
945                         return cpufreq_add_policy_cpu(cpu, sibling, dev);
946                 }
947         }
948         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
949 #endif
950 #endif
951
952         if (!try_module_get(cpufreq_driver->owner)) {
953                 ret = -EINVAL;
954                 goto module_out;
955         }
956
957         policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
958         if (!policy)
959                 goto nomem_out;
960
961         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
962                 goto err_free_policy;
963
964         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
965                 goto err_free_cpumask;
966
967         policy->cpu = cpu;
968         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
969         cpumask_copy(policy->cpus, cpumask_of(cpu));
970
971         /* Initially set CPU itself as the policy_cpu */
972         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
973
974         init_completion(&policy->kobj_unregister);
975         INIT_WORK(&policy->update, handle_update);
976
977         /* call driver. From then on the cpufreq must be able
978          * to accept all calls to ->verify and ->setpolicy for this CPU
979          */
980         ret = cpufreq_driver->init(policy);
981         if (ret) {
982                 pr_debug("initialization failed\n");
983                 goto err_set_policy_cpu;
984         }
985
986         /* related cpus should atleast have policy->cpus */
987         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
988
989         /*
990          * affected cpus must always be the one, which are online. We aren't
991          * managing offline cpus here.
992          */
993         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
994
995         policy->user_policy.min = policy->min;
996         policy->user_policy.max = policy->max;
997
998         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
999                                      CPUFREQ_START, policy);
1000
1001 #ifdef CONFIG_HOTPLUG_CPU
1002         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1003         if (gov) {
1004                 policy->governor = gov;
1005                 pr_debug("Restoring governor %s for cpu %d\n",
1006                        policy->governor->name, cpu);
1007         }
1008 #endif
1009
1010         ret = cpufreq_add_dev_interface(cpu, policy, dev);
1011         if (ret)
1012                 goto err_out_unregister;
1013
1014         kobject_uevent(&policy->kobj, KOBJ_ADD);
1015         module_put(cpufreq_driver->owner);
1016         pr_debug("initialization complete\n");
1017
1018         return 0;
1019
1020 err_out_unregister:
1021         write_lock_irqsave(&cpufreq_driver_lock, flags);
1022         for_each_cpu(j, policy->cpus)
1023                 per_cpu(cpufreq_cpu_data, j) = NULL;
1024         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1025
1026         kobject_put(&policy->kobj);
1027         wait_for_completion(&policy->kobj_unregister);
1028
1029 err_set_policy_cpu:
1030         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1031         free_cpumask_var(policy->related_cpus);
1032 err_free_cpumask:
1033         free_cpumask_var(policy->cpus);
1034 err_free_policy:
1035         kfree(policy);
1036 nomem_out:
1037         module_put(cpufreq_driver->owner);
1038 module_out:
1039         return ret;
1040 }
1041
1042 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1043 {
1044         int j;
1045
1046         policy->last_cpu = policy->cpu;
1047         policy->cpu = cpu;
1048
1049         for_each_cpu(j, policy->cpus)
1050                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1051
1052 #ifdef CONFIG_CPU_FREQ_TABLE
1053         cpufreq_frequency_table_update_policy_cpu(policy);
1054 #endif
1055         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1056                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1057 }
1058
1059 /**
1060  * __cpufreq_remove_dev - remove a CPU device
1061  *
1062  * Removes the cpufreq interface for a CPU device.
1063  * Caller should already have policy_rwsem in write mode for this CPU.
1064  * This routine frees the rwsem before returning.
1065  */
1066 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1067 {
1068         unsigned int cpu = dev->id, ret, cpus;
1069         unsigned long flags;
1070         struct cpufreq_policy *data;
1071         struct kobject *kobj;
1072         struct completion *cmp;
1073         struct device *cpu_dev;
1074
1075         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1076
1077         write_lock_irqsave(&cpufreq_driver_lock, flags);
1078
1079         data = per_cpu(cpufreq_cpu_data, cpu);
1080         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1081
1082         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1083
1084         if (!data) {
1085                 pr_debug("%s: No cpu_data found\n", __func__);
1086                 return -EINVAL;
1087         }
1088
1089         if (cpufreq_driver->target)
1090                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1091
1092 #ifdef CONFIG_HOTPLUG_CPU
1093         if (!cpufreq_driver->setpolicy)
1094                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1095                         data->governor->name, CPUFREQ_NAME_LEN);
1096 #endif
1097
1098         WARN_ON(lock_policy_rwsem_write(cpu));
1099         cpus = cpumask_weight(data->cpus);
1100
1101         if (cpus > 1)
1102                 cpumask_clear_cpu(cpu, data->cpus);
1103         unlock_policy_rwsem_write(cpu);
1104
1105         if (cpu != data->cpu) {
1106                 sysfs_remove_link(&dev->kobj, "cpufreq");
1107         } else if (cpus > 1) {
1108                 /* first sibling now owns the new sysfs dir */
1109                 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1110                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1111                 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1112                 if (ret) {
1113                         pr_err("%s: Failed to move kobj: %d", __func__, ret);
1114
1115                         WARN_ON(lock_policy_rwsem_write(cpu));
1116                         cpumask_set_cpu(cpu, data->cpus);
1117
1118                         write_lock_irqsave(&cpufreq_driver_lock, flags);
1119                         per_cpu(cpufreq_cpu_data, cpu) = data;
1120                         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1121
1122                         unlock_policy_rwsem_write(cpu);
1123
1124                         ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1125                                         "cpufreq");
1126                         return -EINVAL;
1127                 }
1128
1129                 WARN_ON(lock_policy_rwsem_write(cpu));
1130                 update_policy_cpu(data, cpu_dev->id);
1131                 unlock_policy_rwsem_write(cpu);
1132                 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1133                                 __func__, cpu_dev->id, cpu);
1134         }
1135
1136         /* If cpu is last user of policy, free policy */
1137         if (cpus == 1) {
1138                 if (cpufreq_driver->target)
1139                         __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1140
1141                 lock_policy_rwsem_read(cpu);
1142                 kobj = &data->kobj;
1143                 cmp = &data->kobj_unregister;
1144                 unlock_policy_rwsem_read(cpu);
1145                 kobject_put(kobj);
1146
1147                 /* we need to make sure that the underlying kobj is actually
1148                  * not referenced anymore by anybody before we proceed with
1149                  * unloading.
1150                  */
1151                 pr_debug("waiting for dropping of refcount\n");
1152                 wait_for_completion(cmp);
1153                 pr_debug("wait complete\n");
1154
1155                 if (cpufreq_driver->exit)
1156                         cpufreq_driver->exit(data);
1157
1158                 free_cpumask_var(data->related_cpus);
1159                 free_cpumask_var(data->cpus);
1160                 pr_debug("policy %d is freed\n",data->cpu);
1161                 kfree(data);
1162         } else {
1163                 pr_debug("%s: removing link, cpu: %d policy[%d] %x\n", __func__, cpu,data->cpu,data);
1164                 cpufreq_cpu_put(data);
1165                 if (cpufreq_driver->target) {
1166                         __cpufreq_governor(data, CPUFREQ_GOV_START);
1167                         __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1168                 }
1169         }
1170
1171         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1172         return 0;
1173 }
1174
1175
1176 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1177 {
1178         unsigned int cpu = dev->id;
1179         int retval;
1180
1181         if (cpu_is_offline(cpu))
1182                 return 0;
1183
1184         retval = __cpufreq_remove_dev(dev, sif);
1185         return retval;
1186 }
1187
1188
1189 static void handle_update(struct work_struct *work)
1190 {
1191         struct cpufreq_policy *policy =
1192                 container_of(work, struct cpufreq_policy, update);
1193         unsigned int cpu = policy->cpu;
1194         pr_debug("handle_update for cpu %u called\n", cpu);
1195         cpufreq_update_policy(cpu);
1196 }
1197
1198 /**
1199  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1200  *      @cpu: cpu number
1201  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1202  *      @new_freq: CPU frequency the CPU actually runs at
1203  *
1204  *      We adjust to current frequency first, and need to clean up later.
1205  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1206  */
1207 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1208                                 unsigned int new_freq)
1209 {
1210         struct cpufreq_policy *policy;
1211         struct cpufreq_freqs freqs;
1212         unsigned long flags;
1213
1214
1215         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1216                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1217
1218         freqs.old = old_freq;
1219         freqs.new = new_freq;
1220
1221         read_lock_irqsave(&cpufreq_driver_lock, flags);
1222         policy = per_cpu(cpufreq_cpu_data, cpu);
1223         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1224
1225         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1226         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1227 }
1228
1229
1230 /**
1231  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1232  * @cpu: CPU number
1233  *
1234  * This is the last known freq, without actually getting it from the driver.
1235  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1236  */
1237 unsigned int cpufreq_quick_get(unsigned int cpu)
1238 {
1239         struct cpufreq_policy *policy;
1240         unsigned int ret_freq = 0;
1241
1242         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1243                 return cpufreq_driver->get(cpu);
1244
1245         policy = cpufreq_cpu_get(cpu);
1246         if (policy) {
1247                 ret_freq = policy->cur;
1248                 cpufreq_cpu_put(policy);
1249         }
1250
1251         return ret_freq;
1252 }
1253 EXPORT_SYMBOL(cpufreq_quick_get);
1254
1255 /**
1256  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1257  * @cpu: CPU number
1258  *
1259  * Just return the max possible frequency for a given CPU.
1260  */
1261 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1262 {
1263         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264         unsigned int ret_freq = 0;
1265
1266         if (policy) {
1267                 ret_freq = policy->max;
1268                 cpufreq_cpu_put(policy);
1269         }
1270
1271         return ret_freq;
1272 }
1273 EXPORT_SYMBOL(cpufreq_quick_get_max);
1274
1275
1276 static unsigned int __cpufreq_get(unsigned int cpu)
1277 {
1278         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1279         unsigned int ret_freq = 0;
1280
1281         if (!cpufreq_driver->get)
1282                 return ret_freq;
1283
1284         ret_freq = cpufreq_driver->get(cpu);
1285
1286         if (ret_freq && policy->cur &&
1287                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1288                 /* verify no discrepancy between actual and
1289                                         saved value exists */
1290                 if (unlikely(ret_freq != policy->cur)) {
1291                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1292                         schedule_work(&policy->update);
1293                 }
1294         }
1295
1296         return ret_freq;
1297 }
1298
1299 /**
1300  * cpufreq_get - get the current CPU frequency (in kHz)
1301  * @cpu: CPU number
1302  *
1303  * Get the CPU current (static) CPU frequency
1304  */
1305 unsigned int cpufreq_get(unsigned int cpu)
1306 {
1307         unsigned int ret_freq = 0;
1308         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1309
1310         if (!policy)
1311                 goto out;
1312
1313         if (unlikely(lock_policy_rwsem_read(cpu)))
1314                 goto out_policy;
1315
1316         ret_freq = __cpufreq_get(cpu);
1317
1318         unlock_policy_rwsem_read(cpu);
1319
1320 out_policy:
1321         cpufreq_cpu_put(policy);
1322 out:
1323         return ret_freq;
1324 }
1325 EXPORT_SYMBOL(cpufreq_get);
1326
1327 static struct subsys_interface cpufreq_interface = {
1328         .name           = "cpufreq",
1329         .subsys         = &cpu_subsys,
1330         .add_dev        = cpufreq_add_dev,
1331         .remove_dev     = cpufreq_remove_dev,
1332 };
1333
1334
1335 /**
1336  * cpufreq_suspend() - Suspend CPUFreq governors
1337  *
1338  * Called during system wide Suspend/Hibernate cycles for suspending governors
1339  * as some platforms can't change frequency after this point in suspend cycle.
1340  * Because some of the devices (like: i2c, regulators, etc) they use for
1341  * changing frequency are suspended quickly after this point.
1342  */
1343 void cpufreq_suspend(void)
1344 {
1345         struct cpufreq_policy *policy;
1346
1347 #if defined(CONFIG_MACH_SP9838AEA_5MOD) || defined(CONFIG_MACH_SP9838AEA_4CORE) || defined(CONFIG_MACH_SP9838AEA_8CORE_LIGHT_SLEEP) || defined(CONFIG_MACH_SS_SHARKLT8)
1348         return;
1349 #endif
1350         if (!cpufreq_driver)
1351                 return;
1352
1353         if (!has_target())
1354                 return;
1355
1356         pr_debug("%s: Suspending Governors\n", __func__);
1357
1358         policy = cpufreq_cpu_get(0);
1359         if (!policy) {
1360                         pr_err("Policy is NULL\n");
1361                         return;
1362                 }
1363
1364         if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1365                 pr_err("%s: Failed to stop governor for policy: %p\n",
1366                         __func__, policy);
1367         else if (cpufreq_driver->suspend
1368             && cpufreq_driver->suspend(policy))
1369                 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1370                         policy);
1371
1372         cpufreq_suspended = true;
1373 }
1374
1375 /**
1376  * cpufreq_resume() - Resume CPUFreq governors
1377  *
1378  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1379  * are suspended with cpufreq_suspend().
1380  */
1381 void cpufreq_resume(void)
1382 {
1383         struct cpufreq_policy *policy;
1384
1385 #if defined(CONFIG_MACH_SP9838AEA_5MOD) || defined(CONFIG_MACH_SP9838AEA_4CORE) || defined(CONFIG_MACH_SP9838AEA_8CORE_LIGHT_SLEEP) || defined(CONFIG_MACH_SS_SHARKLT8)
1386         return;
1387 #endif
1388
1389         if (!cpufreq_driver)
1390                 return;
1391
1392         if (!has_target())
1393                 return;
1394
1395         pr_debug("%s: Resuming Governors\n", __func__);
1396
1397         cpufreq_suspended = false;
1398
1399         policy = cpufreq_cpu_get(0);
1400         if (!policy) {
1401                 pr_err("Policy is NULL\n");
1402                 return;
1403         }
1404         if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1405             || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1406                 pr_err("%s: Failed to start governor for policy: %p\n",
1407                         __func__, policy);
1408         else if (cpufreq_driver->resume
1409             && cpufreq_driver->resume(policy))
1410                 pr_err("%s: Failed to resume driver: %p\n", __func__,
1411                         policy);
1412
1413         schedule_work(&policy->update);
1414 }
1415
1416 /**
1417  *      cpufreq_get_current_driver - return current driver's name
1418  *
1419  *      Return the name string of the currently loaded cpufreq driver
1420  *      or NULL, if none.
1421  */
1422 const char *cpufreq_get_current_driver(void)
1423 {
1424         if (cpufreq_driver)
1425                 return cpufreq_driver->name;
1426
1427         return NULL;
1428 }
1429 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1430
1431 /*********************************************************************
1432  *                     NOTIFIER LISTS INTERFACE                      *
1433  *********************************************************************/
1434
1435 /**
1436  *      cpufreq_register_notifier - register a driver with cpufreq
1437  *      @nb: notifier function to register
1438  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1439  *
1440  *      Add a driver to one of two lists: either a list of drivers that
1441  *      are notified about clock rate changes (once before and once after
1442  *      the transition), or a list of drivers that are notified about
1443  *      changes in cpufreq policy.
1444  *
1445  *      This function may sleep, and has the same return conditions as
1446  *      blocking_notifier_chain_register.
1447  */
1448 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1449 {
1450         int ret;
1451
1452         if (cpufreq_disabled())
1453                 return -EINVAL;
1454
1455         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1456
1457         switch (list) {
1458         case CPUFREQ_TRANSITION_NOTIFIER:
1459                 ret = srcu_notifier_chain_register(
1460                                 &cpufreq_transition_notifier_list, nb);
1461                 break;
1462         case CPUFREQ_POLICY_NOTIFIER:
1463                 ret = blocking_notifier_chain_register(
1464                                 &cpufreq_policy_notifier_list, nb);
1465                 break;
1466         default:
1467                 ret = -EINVAL;
1468         }
1469
1470         return ret;
1471 }
1472 EXPORT_SYMBOL(cpufreq_register_notifier);
1473
1474
1475 /**
1476  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1477  *      @nb: notifier block to be unregistered
1478  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1479  *
1480  *      Remove a driver from the CPU frequency notifier list.
1481  *
1482  *      This function may sleep, and has the same return conditions as
1483  *      blocking_notifier_chain_unregister.
1484  */
1485 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1486 {
1487         int ret;
1488
1489         if (cpufreq_disabled())
1490                 return -EINVAL;
1491
1492         switch (list) {
1493         case CPUFREQ_TRANSITION_NOTIFIER:
1494                 ret = srcu_notifier_chain_unregister(
1495                                 &cpufreq_transition_notifier_list, nb);
1496                 break;
1497         case CPUFREQ_POLICY_NOTIFIER:
1498                 ret = blocking_notifier_chain_unregister(
1499                                 &cpufreq_policy_notifier_list, nb);
1500                 break;
1501         default:
1502                 ret = -EINVAL;
1503         }
1504
1505         return ret;
1506 }
1507 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1508
1509
1510 /*********************************************************************
1511  *                              GOVERNORS                            *
1512  *********************************************************************/
1513
1514
1515 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1516                             unsigned int target_freq,
1517                             unsigned int relation)
1518 {
1519         int retval = -EINVAL;
1520         unsigned int old_target_freq = target_freq;
1521
1522         if (cpufreq_disabled())
1523                 return -ENODEV;
1524
1525         /* Make sure that target_freq is within supported range */
1526         if (target_freq > policy->max)
1527                 target_freq = policy->max;
1528         if (target_freq < policy->min)
1529                 target_freq = policy->min;
1530
1531         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1532                         policy->cpu, target_freq, relation, old_target_freq);
1533
1534         if (target_freq == policy->cur)
1535                 return 0;
1536
1537         if (cpufreq_driver->target)
1538                 retval = cpufreq_driver->target(policy, target_freq, relation);
1539
1540         return retval;
1541 }
1542 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1543
1544 int cpufreq_driver_target(struct cpufreq_policy *policy,
1545                           unsigned int target_freq,
1546                           unsigned int relation)
1547 {
1548         int ret = -EINVAL;
1549
1550         policy = cpufreq_cpu_get(policy->cpu);
1551         if (!policy)
1552                 goto no_policy;
1553
1554         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1555                 goto fail;
1556
1557         ret = __cpufreq_driver_target(policy, target_freq, relation);
1558
1559         unlock_policy_rwsem_write(policy->cpu);
1560
1561 fail:
1562         cpufreq_cpu_put(policy);
1563 no_policy:
1564         return ret;
1565 }
1566 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1567
1568 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1569 {
1570         int ret = 0;
1571
1572         if (cpufreq_disabled())
1573                 return ret;
1574
1575         if (!cpufreq_driver->getavg)
1576                 return 0;
1577
1578         policy = cpufreq_cpu_get(policy->cpu);
1579         if (!policy)
1580                 return -EINVAL;
1581
1582         ret = cpufreq_driver->getavg(policy, cpu);
1583
1584         cpufreq_cpu_put(policy);
1585         return ret;
1586 }
1587 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1588
1589 /*
1590  * when "event" is CPUFREQ_GOV_LIMITS
1591  */
1592
1593 static int __cpufreq_governor(struct cpufreq_policy *policy,
1594                                         unsigned int event)
1595 {
1596         int ret;
1597
1598         /* Only must be defined when default governor is known to have latency
1599            restrictions, like e.g. conservative or ondemand.
1600            That this is the case is already ensured in Kconfig
1601         */
1602 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1603         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1604 #else
1605         struct cpufreq_governor *gov = NULL;
1606 #endif
1607
1608         /* Don't start any governor operations if we are entering suspend */
1609         if (cpufreq_suspended)
1610                 return 0;
1611
1612         if (policy->governor->max_transition_latency &&
1613             policy->cpuinfo.transition_latency >
1614             policy->governor->max_transition_latency) {
1615                 if (!gov)
1616                         return -EINVAL;
1617                 else {
1618                         printk(KERN_WARNING "%s governor failed, too long"
1619                                " transition latency of HW, fallback"
1620                                " to %s governor\n",
1621                                policy->governor->name,
1622                                gov->name);
1623                         policy->governor = gov;
1624                 }
1625         }
1626
1627         if (!try_module_get(policy->governor->owner))
1628                 return -EINVAL;
1629
1630         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1631                                                 policy->cpu, event);
1632
1633         mutex_lock(&cpufreq_governor_lock);
1634         if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1635             (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1636                 mutex_unlock(&cpufreq_governor_lock);
1637                 return -EBUSY;
1638         }
1639
1640         if (event == CPUFREQ_GOV_STOP)
1641                 policy->governor_enabled = false;
1642         else if (event == CPUFREQ_GOV_START)
1643                 policy->governor_enabled = true;
1644
1645         mutex_unlock(&cpufreq_governor_lock);
1646
1647         ret = policy->governor->governor(policy, event);
1648
1649         if (!ret) {
1650                 if (event == CPUFREQ_GOV_POLICY_INIT)
1651                         policy->governor->initialized++;
1652                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1653                         policy->governor->initialized--;
1654         } else {
1655                 /* Restore original values */
1656                 mutex_lock(&cpufreq_governor_lock);
1657                 if (event == CPUFREQ_GOV_STOP)
1658                         policy->governor_enabled = true;
1659                 else if (event == CPUFREQ_GOV_START)
1660                         policy->governor_enabled = false;
1661                 mutex_unlock(&cpufreq_governor_lock);
1662         }
1663
1664         /* we keep one module reference alive for
1665                         each CPU governed by this CPU */
1666         if ((event != CPUFREQ_GOV_START) || ret)
1667                 module_put(policy->governor->owner);
1668         if ((event == CPUFREQ_GOV_STOP) && !ret)
1669                 module_put(policy->governor->owner);
1670
1671         return ret;
1672 }
1673
1674
1675 int cpufreq_register_governor(struct cpufreq_governor *governor)
1676 {
1677         int err;
1678
1679         if (!governor)
1680                 return -EINVAL;
1681
1682         if (cpufreq_disabled())
1683                 return -ENODEV;
1684
1685         mutex_lock(&cpufreq_governor_mutex);
1686
1687         governor->initialized = 0;
1688         err = -EBUSY;
1689         if (__find_governor(governor->name) == NULL) {
1690                 err = 0;
1691                 list_add(&governor->governor_list, &cpufreq_governor_list);
1692         }
1693
1694         mutex_unlock(&cpufreq_governor_mutex);
1695         return err;
1696 }
1697 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1698
1699
1700 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1701 {
1702 #ifdef CONFIG_HOTPLUG_CPU
1703         int cpu;
1704 #endif
1705
1706         if (!governor)
1707                 return;
1708
1709         if (cpufreq_disabled())
1710                 return;
1711
1712 #ifdef CONFIG_HOTPLUG_CPU
1713         for_each_present_cpu(cpu) {
1714                 if (cpu_online(cpu))
1715                         continue;
1716                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1717                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1718         }
1719 #endif
1720
1721         mutex_lock(&cpufreq_governor_mutex);
1722         list_del(&governor->governor_list);
1723         mutex_unlock(&cpufreq_governor_mutex);
1724         return;
1725 }
1726 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1727
1728
1729
1730 /*********************************************************************
1731  *                          POLICY INTERFACE                         *
1732  *********************************************************************/
1733
1734 /**
1735  * cpufreq_get_policy - get the current cpufreq_policy
1736  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1737  *      is written
1738  *
1739  * Reads the current cpufreq policy.
1740  */
1741 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1742 {
1743         struct cpufreq_policy *cpu_policy;
1744         if (!policy)
1745                 return -EINVAL;
1746
1747         cpu_policy = cpufreq_cpu_get(cpu);
1748         if (!cpu_policy)
1749                 return -EINVAL;
1750
1751         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1752
1753         cpufreq_cpu_put(cpu_policy);
1754         return 0;
1755 }
1756 EXPORT_SYMBOL(cpufreq_get_policy);
1757
1758
1759 /*
1760  * data   : current policy.
1761  * policy : policy to be set.
1762  */
1763 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1764                                 struct cpufreq_policy *policy)
1765 {
1766         int ret = 0, failed = 1;
1767
1768         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1769                 policy->min, policy->max);
1770
1771         memcpy(&policy->cpuinfo, &data->cpuinfo,
1772                                 sizeof(struct cpufreq_cpuinfo));
1773
1774         if (policy->min > data->max || policy->max < data->min) {
1775                 ret = -EINVAL;
1776                 goto error_out;
1777         }
1778
1779         /* verify the cpu speed can be set within this limit */
1780         ret = cpufreq_driver->verify(policy);
1781         if (ret)
1782                 goto error_out;
1783
1784         /* adjust if necessary - all reasons */
1785         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1786                         CPUFREQ_ADJUST, policy);
1787
1788         /* adjust if necessary - hardware incompatibility*/
1789         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1790                         CPUFREQ_INCOMPATIBLE, policy);
1791
1792         /* verify the cpu speed can be set within this limit,
1793            which might be different to the first one */
1794         ret = cpufreq_driver->verify(policy);
1795         if (ret)
1796                 goto error_out;
1797
1798         /* notification of the new policy */
1799         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1800                         CPUFREQ_NOTIFY, policy);
1801
1802         data->min = policy->min;
1803         data->max = policy->max;
1804
1805         pr_debug("new min and max freqs are %u - %u kHz\n",
1806                                         data->min, data->max);
1807
1808         if (cpufreq_driver->setpolicy) {
1809                 data->policy = policy->policy;
1810                 pr_debug("setting range\n");
1811                 ret = cpufreq_driver->setpolicy(policy);
1812         } else {
1813                 if (policy->governor != data->governor) {
1814                         /* save old, working values */
1815                         struct cpufreq_governor *old_gov = data->governor;
1816
1817                         pr_debug("governor switch\n");
1818
1819                         /* end old governor */
1820                         if (data->governor) {
1821                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1822                                 unlock_policy_rwsem_write(policy->cpu);
1823                                 __cpufreq_governor(data,
1824                                                 CPUFREQ_GOV_POLICY_EXIT);
1825                                 lock_policy_rwsem_write(policy->cpu);
1826                         }
1827
1828                         /* start new governor */
1829                         data->governor = policy->governor;
1830                         if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1831                                 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1832                                         failed = 0;
1833                                 } else {
1834                                         unlock_policy_rwsem_write(policy->cpu);
1835                                         __cpufreq_governor(data,
1836                                                         CPUFREQ_GOV_POLICY_EXIT);
1837                                         lock_policy_rwsem_write(policy->cpu);
1838                                 }
1839                         }
1840
1841                         if (failed) {
1842                                 /* new governor failed, so re-start old one */
1843                                 pr_debug("starting governor %s failed\n",
1844                                                         data->governor->name);
1845                                 if (old_gov) {
1846                                         data->governor = old_gov;
1847                                         __cpufreq_governor(data,
1848                                                         CPUFREQ_GOV_POLICY_INIT);
1849                                         __cpufreq_governor(data,
1850                                                            CPUFREQ_GOV_START);
1851                                 }
1852                                 ret = -EINVAL;
1853                                 goto error_out;
1854                         }
1855                         /* might be a policy change, too, so fall through */
1856                 }
1857                 pr_debug("governor: change or update limits\n");
1858                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1859         }
1860
1861 error_out:
1862         return ret;
1863 }
1864
1865 /**
1866  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1867  *      @cpu: CPU which shall be re-evaluated
1868  *
1869  *      Useful for policy notifiers which have different necessities
1870  *      at different times.
1871  */
1872 int cpufreq_update_policy(unsigned int cpu)
1873 {
1874         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1875         struct cpufreq_policy policy;
1876         int ret;
1877
1878         if (!data) {
1879                 ret = -ENODEV;
1880                 goto no_policy;
1881         }
1882
1883         if (unlikely(lock_policy_rwsem_write(cpu))) {
1884                 ret = -EINVAL;
1885                 goto fail;
1886         }
1887
1888         pr_debug("updating policy for CPU %u\n", cpu);
1889         memcpy(&policy, data, sizeof(struct cpufreq_policy));
1890         policy.min = data->user_policy.min;
1891         policy.max = data->user_policy.max;
1892         policy.policy = data->user_policy.policy;
1893         policy.governor = data->user_policy.governor;
1894
1895         /* BIOS might change freq behind our back
1896           -> ask driver for current freq and notify governors about a change */
1897         if (cpufreq_driver->get) {
1898                 policy.cur = cpufreq_driver->get(cpu);
1899                 if (!data->cur) {
1900                         pr_debug("Driver did not initialize current freq");
1901                         data->cur = policy.cur;
1902                 } else {
1903                         if (data->cur != policy.cur && cpufreq_driver->target)
1904                                 cpufreq_out_of_sync(cpu, data->cur,
1905                                                                 policy.cur);
1906                 }
1907         }
1908
1909         ret = __cpufreq_set_policy(data, &policy);
1910
1911         unlock_policy_rwsem_write(cpu);
1912
1913 fail:
1914         cpufreq_cpu_put(data);
1915 no_policy:
1916         return ret;
1917 }
1918 EXPORT_SYMBOL(cpufreq_update_policy);
1919
1920 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1921                                         unsigned long action, void *hcpu)
1922 {
1923         unsigned int cpu = (unsigned long)hcpu;
1924         struct device *dev;
1925
1926         dev = get_cpu_device(cpu);
1927         if (dev) {
1928                 switch (action) {
1929                 case CPU_ONLINE:
1930                 case CPU_ONLINE_FROZEN:
1931                         cpufreq_add_dev(dev, NULL);
1932                         break;
1933                 case CPU_DOWN_PREPARE:
1934                 case CPU_DOWN_PREPARE_FROZEN:
1935                         __cpufreq_remove_dev(dev, NULL);
1936                         break;
1937                 case CPU_DOWN_FAILED:
1938                 case CPU_DOWN_FAILED_FROZEN:
1939                         cpufreq_add_dev(dev, NULL);
1940                         break;
1941                 }
1942         }
1943         return NOTIFY_OK;
1944 }
1945
1946 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1947     .notifier_call = cpufreq_cpu_callback,
1948 };
1949
1950 int cpufreq_thermal_limit(int cluster, int max_freq)
1951 {
1952         unsigned int ret;
1953         struct cpufreq_policy *policy, new_policy;
1954         int cpu = 0;
1955
1956         cpu = cluster * 4;
1957         policy = cpufreq_cpu_get(cpu);
1958         if (!policy)
1959                 return -EINVAL;
1960         if (policy->max == max_freq)
1961                 return 0;
1962         ret = cpufreq_get_policy(&new_policy, policy->cpu);
1963         if (ret)
1964                 return -EINVAL;
1965         new_policy.max = max_freq;
1966         ret = __cpufreq_set_policy(policy, &new_policy);
1967         policy->user_policy.max = policy->max;
1968
1969         return ret;
1970
1971 }
1972
1973 /*********************************************************************
1974  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1975  *********************************************************************/
1976
1977 /**
1978  * cpufreq_register_driver - register a CPU Frequency driver
1979  * @driver_data: A struct cpufreq_driver containing the values#
1980  * submitted by the CPU Frequency driver.
1981  *
1982  *   Registers a CPU Frequency driver to this core code. This code
1983  * returns zero on success, -EBUSY when another driver got here first
1984  * (and isn't unregistered in the meantime).
1985  *
1986  */
1987 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1988 {
1989         unsigned long flags;
1990         int ret;
1991
1992         if (cpufreq_disabled())
1993                 return -ENODEV;
1994
1995         if (!driver_data || !driver_data->verify || !driver_data->init ||
1996             ((!driver_data->setpolicy) && (!driver_data->target)))
1997                 return -EINVAL;
1998
1999         pr_debug("trying to register driver %s\n", driver_data->name);
2000
2001         if (driver_data->setpolicy)
2002                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2003
2004         write_lock_irqsave(&cpufreq_driver_lock, flags);
2005         if (cpufreq_driver) {
2006                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2007                 return -EBUSY;
2008         }
2009         cpufreq_driver = driver_data;
2010         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2011
2012         ret = subsys_interface_register(&cpufreq_interface);
2013         if (ret)
2014                 goto err_null_driver;
2015
2016         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2017                 int i;
2018                 ret = -ENODEV;
2019
2020                 /* check for at least one working CPU */
2021                 for (i = 0; i < nr_cpu_ids; i++)
2022                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2023                                 ret = 0;
2024                                 break;
2025                         }
2026
2027                 /* if all ->init() calls failed, unregister */
2028                 if (ret) {
2029                         pr_debug("no CPU initialized for driver %s\n",
2030                                                         driver_data->name);
2031                         goto err_if_unreg;
2032                 }
2033         }
2034
2035         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2036         pr_debug("driver %s up and running\n", driver_data->name);
2037
2038         return 0;
2039 err_if_unreg:
2040         subsys_interface_unregister(&cpufreq_interface);
2041 err_null_driver:
2042         write_lock_irqsave(&cpufreq_driver_lock, flags);
2043         cpufreq_driver = NULL;
2044         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2045         return ret;
2046 }
2047 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2048
2049
2050 /**
2051  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2052  *
2053  *    Unregister the current CPUFreq driver. Only call this if you have
2054  * the right to do so, i.e. if you have succeeded in initialising before!
2055  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2056  * currently not initialised.
2057  */
2058 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2059 {
2060         unsigned long flags;
2061
2062         if (!cpufreq_driver || (driver != cpufreq_driver))
2063                 return -EINVAL;
2064
2065         pr_debug("unregistering driver %s\n", driver->name);
2066
2067         subsys_interface_unregister(&cpufreq_interface);
2068         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2069
2070         write_lock_irqsave(&cpufreq_driver_lock, flags);
2071         cpufreq_driver = NULL;
2072         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2073
2074         return 0;
2075 }
2076 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2077
2078 static int __init cpufreq_core_init(void)
2079 {
2080         int cpu;
2081
2082         if (cpufreq_disabled())
2083                 return -ENODEV;
2084
2085         for_each_possible_cpu(cpu) {
2086                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2087                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2088         }
2089
2090         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2091         BUG_ON(!cpufreq_global_kobject);
2092
2093         return 0;
2094 }
2095 core_initcall(cpufreq_core_init);