usb: gadget: g_ffs: Allow to set bmAttributes of configuration
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include "cpufreq_governor.h"
34 #include <linux/reboot.h>
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
38
39 struct cpufreq_interactive_cpuinfo {
40         struct timer_list cpu_timer;
41         struct timer_list cpu_slack_timer;
42         spinlock_t load_lock; /* protects the next 4 fields */
43         u64 time_in_idle;
44         u64 time_in_idle_timestamp;
45         u64 cputime_speedadj;
46         u64 cputime_speedadj_timestamp;
47         struct cpufreq_policy *policy;
48         struct cpufreq_frequency_table *freq_table;
49         spinlock_t target_freq_lock; /*protects target freq */
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         unsigned int max_freq;
53         u64 floor_validate_time;
54         u64 hispeed_validate_time;
55         struct rw_semaphore enable_sem;
56         int governor_enabled;
57 };
58
59 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60
61 /* realtime thread handles frequency scaling */
62 static struct task_struct *speedchange_task;
63 static cpumask_t speedchange_cpumask;
64 static spinlock_t speedchange_cpumask_lock;
65 static struct mutex gov_lock;
66
67 /* Target load.  Lower values result in higher CPU speeds. */
68 #define DEFAULT_TARGET_LOAD 90
69 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
70
71 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
72 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
73 static unsigned int default_above_hispeed_delay[] = {
74         DEFAULT_ABOVE_HISPEED_DELAY };
75
76 struct cpufreq_interactive_tunables {
77         int usage_count;
78         /* Hi speed to bump to from lo speed when load burst (default max) */
79         unsigned int hispeed_freq;
80         /* Go to hi speed when CPU load at or above this value. */
81 #define DEFAULT_GO_HISPEED_LOAD 99
82         unsigned long go_hispeed_load;
83         /* Target load. Lower values result in higher CPU speeds. */
84         spinlock_t target_loads_lock;
85         unsigned int *target_loads;
86         int ntarget_loads;
87         /*
88          * The minimum amount of time to spend at a frequency before we can ramp
89          * down.
90          */
91 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
92         unsigned long min_sample_time;
93         /*
94          * The sample rate of the timer used to increase frequency
95          */
96         unsigned long timer_rate;
97         /*
98          * Wait this long before raising speed above hispeed, by default a
99          * single timer interval.
100          */
101         spinlock_t above_hispeed_delay_lock;
102         unsigned int *above_hispeed_delay;
103         int nabove_hispeed_delay;
104         /* Non-zero means indefinite speed boost active */
105         int boost_val;
106         /* Duration of a boot pulse in usecs */
107         int boostpulse_duration_val;
108         /* End time of boost pulse in ktime converted to usecs */
109         u64 boostpulse_endtime;
110         bool boosted;
111         /*
112          * Max additional time to wait in idle, beyond timer_rate, at speeds
113          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
114          */
115 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
116         int timer_slack_val;
117         bool io_is_busy;
118 };
119
120 /* For cases where we have single governor instance for system */
121 static struct cpufreq_interactive_tunables *common_tunables;
122 #define GOVERNOR_BOOT_TIME      (50*HZ)
123 static unsigned long boot_done;
124
125 static struct attribute_group *get_sysfs_attr(void);
126
127 static void cpufreq_interactive_timer_resched(
128         struct cpufreq_interactive_cpuinfo *pcpu)
129 {
130         struct cpufreq_interactive_tunables *tunables =
131                 pcpu->policy->governor_data;
132         unsigned long expires;
133         unsigned long flags;
134
135         spin_lock_irqsave(&pcpu->load_lock, flags);
136         pcpu->time_in_idle =
137                 get_cpu_idle_time(smp_processor_id(),
138                                   &pcpu->time_in_idle_timestamp,
139                                   tunables->io_is_busy);
140         pcpu->cputime_speedadj = 0;
141         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
142         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
143         mod_timer_pinned(&pcpu->cpu_timer, expires);
144
145         if (tunables->timer_slack_val >= 0 &&
146             pcpu->target_freq > pcpu->policy->min) {
147                 expires += usecs_to_jiffies(tunables->timer_slack_val);
148                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
149         }
150
151         spin_unlock_irqrestore(&pcpu->load_lock, flags);
152 }
153
154 /* The caller shall take enable_sem write semaphore to avoid any timer race.
155  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
156  * function.
157  */
158 static void cpufreq_interactive_timer_start(
159         struct cpufreq_interactive_tunables *tunables, int cpu)
160 {
161         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
162         unsigned long expires = jiffies +
163                 usecs_to_jiffies(tunables->timer_rate);
164         unsigned long flags;
165
166         pcpu->cpu_timer.expires = expires;
167         add_timer_on(&pcpu->cpu_timer, cpu);
168         if (tunables->timer_slack_val >= 0 &&
169             pcpu->target_freq > pcpu->policy->min) {
170                 expires += usecs_to_jiffies(tunables->timer_slack_val);
171                 pcpu->cpu_slack_timer.expires = expires;
172                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
173         }
174
175         spin_lock_irqsave(&pcpu->load_lock, flags);
176         pcpu->time_in_idle =
177                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
178                                   tunables->io_is_busy);
179         pcpu->cputime_speedadj = 0;
180         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
181         spin_unlock_irqrestore(&pcpu->load_lock, flags);
182 }
183
184 static unsigned int freq_to_above_hispeed_delay(
185         struct cpufreq_interactive_tunables *tunables,
186         unsigned int freq)
187 {
188         int i;
189         unsigned int ret;
190         unsigned long flags;
191
192         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
193
194         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
195                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
196                 ;
197
198         ret = tunables->above_hispeed_delay[i];
199         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
200         return ret;
201 }
202
203 static unsigned int freq_to_targetload(
204         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
205 {
206         int i;
207         unsigned int ret;
208         unsigned long flags;
209
210         spin_lock_irqsave(&tunables->target_loads_lock, flags);
211
212         for (i = 0; i < tunables->ntarget_loads - 1 &&
213                     freq >= tunables->target_loads[i+1]; i += 2)
214                 ;
215
216         ret = tunables->target_loads[i];
217         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
218         return ret;
219 }
220
221 /*
222  * If increasing frequencies never map to a lower target load then
223  * choose_freq() will find the minimum frequency that does not exceed its
224  * target load given the current load.
225  */
226 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
227                 unsigned int loadadjfreq)
228 {
229         unsigned int freq = pcpu->policy->cur;
230         unsigned int prevfreq, freqmin, freqmax;
231         unsigned int tl;
232         int index;
233
234         freqmin = 0;
235         freqmax = UINT_MAX;
236
237         do {
238                 prevfreq = freq;
239                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
240
241                 /*
242                  * Find the lowest frequency where the computed load is less
243                  * than or equal to the target load.
244                  */
245
246                 if (cpufreq_frequency_table_target(
247                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
248                             CPUFREQ_RELATION_L, &index))
249                         break;
250                 freq = pcpu->freq_table[index].frequency;
251
252                 if (freq > prevfreq) {
253                         /* The previous frequency is too low. */
254                         freqmin = prevfreq;
255
256                         if (freq >= freqmax) {
257                                 /*
258                                  * Find the highest frequency that is less
259                                  * than freqmax.
260                                  */
261                                 if (cpufreq_frequency_table_target(
262                                             pcpu->policy, pcpu->freq_table,
263                                             freqmax - 1, CPUFREQ_RELATION_H,
264                                             &index))
265                                         break;
266                                 freq = pcpu->freq_table[index].frequency;
267
268                                 if (freq == freqmin) {
269                                         /*
270                                          * The first frequency below freqmax
271                                          * has already been found to be too
272                                          * low.  freqmax is the lowest speed
273                                          * we found that is fast enough.
274                                          */
275                                         freq = freqmax;
276                                         break;
277                                 }
278                         }
279                 } else if (freq < prevfreq) {
280                         /* The previous frequency is high enough. */
281                         freqmax = prevfreq;
282
283                         if (freq <= freqmin) {
284                                 /*
285                                  * Find the lowest frequency that is higher
286                                  * than freqmin.
287                                  */
288                                 if (cpufreq_frequency_table_target(
289                                             pcpu->policy, pcpu->freq_table,
290                                             freqmin + 1, CPUFREQ_RELATION_L,
291                                             &index))
292                                         break;
293                                 freq = pcpu->freq_table[index].frequency;
294
295                                 /*
296                                  * If freqmax is the first frequency above
297                                  * freqmin then we have already found that
298                                  * this speed is fast enough.
299                                  */
300                                 if (freq == freqmax)
301                                         break;
302                         }
303                 }
304
305                 /* If same frequency chosen as previous then done. */
306         } while (freq != prevfreq);
307
308         return freq;
309 }
310
311 static u64 update_load(int cpu)
312 {
313         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
314         struct cpufreq_interactive_tunables *tunables =
315                 pcpu->policy->governor_data;
316         u64 now;
317         u64 now_idle;
318         u64 delta_idle;
319         u64 delta_time;
320         u64 active_time;
321
322         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
323         delta_idle = (now_idle - pcpu->time_in_idle);
324         delta_time = (now - pcpu->time_in_idle_timestamp);
325
326         if (delta_time <= delta_idle)
327                 active_time = 0;
328         else
329                 active_time = delta_time - delta_idle;
330
331         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
332
333         pcpu->time_in_idle = now_idle;
334         pcpu->time_in_idle_timestamp = now;
335         return now;
336 }
337
338 static void cpufreq_interactive_timer(unsigned long data)
339 {
340         u64 now;
341         unsigned int delta_time;
342         u64 cputime_speedadj;
343         int cpu_load;
344         struct cpufreq_interactive_cpuinfo *pcpu =
345                 &per_cpu(cpuinfo, data);
346         struct cpufreq_interactive_tunables *tunables =
347                 pcpu->policy->governor_data;
348         unsigned int new_freq;
349         unsigned int loadadjfreq;
350         unsigned int index;
351         unsigned long flags;
352
353         if (!down_read_trylock(&pcpu->enable_sem))
354                 return;
355         if (!pcpu->governor_enabled)
356                 goto exit;
357
358         spin_lock_irqsave(&pcpu->load_lock, flags);
359         now = update_load(data);
360         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
361         cputime_speedadj = pcpu->cputime_speedadj;
362         spin_unlock_irqrestore(&pcpu->load_lock, flags);
363
364         if (WARN_ON_ONCE(!delta_time))
365                 goto rearm;
366
367         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
368         do_div(cputime_speedadj, delta_time);
369         loadadjfreq = (unsigned int)cputime_speedadj * 100;
370         cpu_load = loadadjfreq / pcpu->target_freq;
371         tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
372
373         if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
374                 if (pcpu->target_freq < tunables->hispeed_freq) {
375                         new_freq = tunables->hispeed_freq;
376                 } else {
377                         new_freq = choose_freq(pcpu, loadadjfreq);
378
379                         if (new_freq < tunables->hispeed_freq)
380                                 new_freq = tunables->hispeed_freq;
381                 }
382         } else {
383                 new_freq = choose_freq(pcpu, loadadjfreq);
384                 if (new_freq > tunables->hispeed_freq &&
385                                 pcpu->target_freq < tunables->hispeed_freq)
386                         new_freq = tunables->hispeed_freq;
387         }
388
389         if (pcpu->target_freq >= tunables->hispeed_freq &&
390             new_freq > pcpu->target_freq &&
391             now - pcpu->hispeed_validate_time <
392             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
393                 trace_cpufreq_interactive_notyet(
394                         data, cpu_load, pcpu->target_freq,
395                         pcpu->policy->cur, new_freq);
396                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
397                 goto rearm;
398         }
399
400         pcpu->hispeed_validate_time = now;
401
402         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
403                                            new_freq, CPUFREQ_RELATION_L,
404                                            &index)) {
405                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
406                 goto rearm;
407         }
408
409         new_freq = pcpu->freq_table[index].frequency;
410
411         /*
412          * Do not scale below floor_freq unless we have been at or above the
413          * floor frequency for the minimum sample time since last validated.
414          */
415         if (new_freq < pcpu->floor_freq) {
416                 if (now - pcpu->floor_validate_time <
417                                 tunables->min_sample_time) {
418                         trace_cpufreq_interactive_notyet(
419                                 data, cpu_load, pcpu->target_freq,
420                                 pcpu->policy->cur, new_freq);
421                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
422                         goto rearm;
423                 }
424         }
425
426         /*
427          * Update the timestamp for checking whether speed has been held at
428          * or above the selected frequency for a minimum of min_sample_time,
429          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
430          * allow the speed to drop as soon as the boostpulse duration expires
431          * (or the indefinite boost is turned off).
432          */
433
434         if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
435                 pcpu->floor_freq = new_freq;
436                 pcpu->floor_validate_time = now;
437         }
438
439         if (pcpu->target_freq == new_freq &&
440                         pcpu->target_freq <= pcpu->policy->cur) {
441                 trace_cpufreq_interactive_already(
442                         data, cpu_load, pcpu->target_freq,
443                         pcpu->policy->cur, new_freq);
444                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
445                 goto rearm_if_notmax;
446         }
447
448         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
449                                          pcpu->policy->cur, new_freq);
450
451         pcpu->target_freq = new_freq;
452         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
453         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
454         cpumask_set_cpu(data, &speedchange_cpumask);
455         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
456         wake_up_process(speedchange_task);
457
458 rearm_if_notmax:
459         /*
460          * Already set max speed and don't see a need to change that,
461          * wait until next idle to re-evaluate, don't need timer.
462          */
463         if (pcpu->target_freq == pcpu->policy->max)
464                 goto exit;
465
466 rearm:
467         if (!timer_pending(&pcpu->cpu_timer))
468                 cpufreq_interactive_timer_resched(pcpu);
469
470 exit:
471         up_read(&pcpu->enable_sem);
472         return;
473 }
474
475 static void cpufreq_interactive_idle_start(void)
476 {
477         struct cpufreq_interactive_cpuinfo *pcpu =
478                 &per_cpu(cpuinfo, smp_processor_id());
479         int pending;
480
481         if (!down_read_trylock(&pcpu->enable_sem))
482                 return;
483         if (!pcpu->governor_enabled) {
484                 up_read(&pcpu->enable_sem);
485                 return;
486         }
487
488         pending = timer_pending(&pcpu->cpu_timer);
489
490         if (pcpu->target_freq != pcpu->policy->min) {
491                 /*
492                  * Entering idle while not at lowest speed.  On some
493                  * platforms this can hold the other CPU(s) at that speed
494                  * even though the CPU is idle. Set a timer to re-evaluate
495                  * speed so this idle CPU doesn't hold the other CPUs above
496                  * min indefinitely.  This should probably be a quirk of
497                  * the CPUFreq driver.
498                  */
499                 if (!pending)
500                         cpufreq_interactive_timer_resched(pcpu);
501         }
502
503         up_read(&pcpu->enable_sem);
504 }
505
506 static void cpufreq_interactive_idle_end(void)
507 {
508         struct cpufreq_interactive_cpuinfo *pcpu =
509                 &per_cpu(cpuinfo, smp_processor_id());
510
511         if (!down_read_trylock(&pcpu->enable_sem))
512                 return;
513         if (!pcpu->governor_enabled) {
514                 up_read(&pcpu->enable_sem);
515                 return;
516         }
517
518         /* Arm the timer for 1-2 ticks later if not already. */
519         if (!timer_pending(&pcpu->cpu_timer)) {
520                 cpufreq_interactive_timer_resched(pcpu);
521         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
522                 del_timer(&pcpu->cpu_timer);
523                 del_timer(&pcpu->cpu_slack_timer);
524                 cpufreq_interactive_timer(smp_processor_id());
525         }
526
527         up_read(&pcpu->enable_sem);
528 }
529
530 static int cpufreq_interactive_speedchange_task(void *data)
531 {
532         unsigned int cpu;
533         cpumask_t tmp_mask;
534         unsigned long flags;
535         struct cpufreq_interactive_cpuinfo *pcpu;
536
537         while (1) {
538                 set_current_state(TASK_INTERRUPTIBLE);
539                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
540
541                 if (cpumask_empty(&speedchange_cpumask)) {
542                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
543                                                flags);
544                         schedule();
545
546                         if (kthread_should_stop())
547                                 break;
548
549                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
550                 }
551
552                 set_current_state(TASK_RUNNING);
553                 tmp_mask = speedchange_cpumask;
554                 cpumask_clear(&speedchange_cpumask);
555                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
556
557                 for_each_cpu(cpu, &tmp_mask) {
558                         unsigned int j;
559                         unsigned int max_freq = 0;
560
561                         pcpu = &per_cpu(cpuinfo, cpu);
562                         if (!down_read_trylock(&pcpu->enable_sem))
563                                 continue;
564                         if (!pcpu->governor_enabled) {
565                                 up_read(&pcpu->enable_sem);
566                                 continue;
567                         }
568
569                         for_each_cpu(j, pcpu->policy->cpus) {
570                                 struct cpufreq_interactive_cpuinfo *pjcpu =
571                                         &per_cpu(cpuinfo, j);
572
573                                 if (pjcpu->target_freq > max_freq)
574                                         max_freq = pjcpu->target_freq;
575                         }
576
577                         if (max_freq != pcpu->policy->cur)
578                                 __cpufreq_driver_target(pcpu->policy,
579                                                         max_freq,
580                                                         CPUFREQ_RELATION_H);
581                         trace_cpufreq_interactive_setspeed(cpu,
582                                                      pcpu->target_freq,
583                                                      pcpu->policy->cur);
584
585                         up_read(&pcpu->enable_sem);
586                 }
587         }
588
589         return 0;
590 }
591
592 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
593 {
594         int i;
595         int anyboost = 0;
596         unsigned long flags[2];
597         struct cpufreq_interactive_cpuinfo *pcpu;
598
599         tunables->boosted = true;
600
601         spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
602
603         for_each_online_cpu(i) {
604                 pcpu = &per_cpu(cpuinfo, i);
605                 if (tunables != pcpu->policy->governor_data)
606                         continue;
607
608                 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
609                 if (pcpu->target_freq < tunables->hispeed_freq) {
610                         pcpu->target_freq = tunables->hispeed_freq;
611                         cpumask_set_cpu(i, &speedchange_cpumask);
612                         pcpu->hispeed_validate_time =
613                                 ktime_to_us(ktime_get());
614                         anyboost = 1;
615                 }
616
617                 /*
618                  * Set floor freq and (re)start timer for when last
619                  * validated.
620                  */
621
622                 pcpu->floor_freq = tunables->hispeed_freq;
623                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
624                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
625         }
626
627         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
628
629         if (anyboost)
630                 wake_up_process(speedchange_task);
631 }
632
633 static int cpufreq_interactive_notifier(
634         struct notifier_block *nb, unsigned long val, void *data)
635 {
636         struct cpufreq_freqs *freq = data;
637         struct cpufreq_interactive_cpuinfo *pcpu;
638         int cpu;
639         unsigned long flags;
640
641         if (val == CPUFREQ_POSTCHANGE) {
642                 pcpu = &per_cpu(cpuinfo, freq->cpu);
643                 if (!down_read_trylock(&pcpu->enable_sem))
644                         return 0;
645                 if (!pcpu->governor_enabled) {
646                         up_read(&pcpu->enable_sem);
647                         return 0;
648                 }
649
650                 for_each_cpu(cpu, pcpu->policy->cpus) {
651                         struct cpufreq_interactive_cpuinfo *pjcpu =
652                                 &per_cpu(cpuinfo, cpu);
653                         if (cpu != freq->cpu) {
654                                 if (!down_read_trylock(&pjcpu->enable_sem))
655                                         continue;
656                                 if (!pjcpu->governor_enabled) {
657                                         up_read(&pjcpu->enable_sem);
658                                         continue;
659                                 }
660                         }
661                         spin_lock_irqsave(&pjcpu->load_lock, flags);
662                         update_load(cpu);
663                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
664                         if (cpu != freq->cpu)
665                                 up_read(&pjcpu->enable_sem);
666                 }
667
668                 up_read(&pcpu->enable_sem);
669         }
670         return 0;
671 }
672
673 static struct notifier_block cpufreq_notifier_block = {
674         .notifier_call = cpufreq_interactive_notifier,
675 };
676
677 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
678 {
679         const char *cp;
680         int i;
681         int ntokens = 1;
682         unsigned int *tokenized_data;
683         int err = -EINVAL;
684
685         cp = buf;
686         while ((cp = strpbrk(cp + 1, " :")))
687                 ntokens++;
688
689         if (!(ntokens & 0x1))
690                 goto err;
691
692         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
693         if (!tokenized_data) {
694                 err = -ENOMEM;
695                 goto err;
696         }
697
698         cp = buf;
699         i = 0;
700         while (i < ntokens) {
701                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
702                         goto err_kfree;
703
704                 cp = strpbrk(cp, " :");
705                 if (!cp)
706                         break;
707                 cp++;
708         }
709
710         if (i != ntokens)
711                 goto err_kfree;
712
713         *num_tokens = ntokens;
714         return tokenized_data;
715
716 err_kfree:
717         kfree(tokenized_data);
718 err:
719         return ERR_PTR(err);
720 }
721
722 static ssize_t show_target_loads(
723         struct cpufreq_interactive_tunables *tunables,
724         char *buf)
725 {
726         int i;
727         ssize_t ret = 0;
728         unsigned long flags;
729
730         spin_lock_irqsave(&tunables->target_loads_lock, flags);
731
732         for (i = 0; i < tunables->ntarget_loads; i++)
733                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
734                                i & 0x1 ? ":" : " ");
735
736         sprintf(buf + ret - 1, "\n");
737         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
738         return ret;
739 }
740
741 static ssize_t store_target_loads(
742         struct cpufreq_interactive_tunables *tunables,
743         const char *buf, size_t count)
744 {
745         int ntokens;
746         unsigned int *new_target_loads = NULL;
747         unsigned long flags;
748
749         new_target_loads = get_tokenized_data(buf, &ntokens);
750         if (IS_ERR(new_target_loads))
751                 return PTR_RET(new_target_loads);
752
753         spin_lock_irqsave(&tunables->target_loads_lock, flags);
754         if (tunables->target_loads != default_target_loads)
755                 kfree(tunables->target_loads);
756         tunables->target_loads = new_target_loads;
757         tunables->ntarget_loads = ntokens;
758         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
759         return count;
760 }
761
762 static ssize_t show_above_hispeed_delay(
763         struct cpufreq_interactive_tunables *tunables, char *buf)
764 {
765         int i;
766         ssize_t ret = 0;
767         unsigned long flags;
768
769         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
770
771         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
772                 ret += sprintf(buf + ret, "%u%s",
773                                tunables->above_hispeed_delay[i],
774                                i & 0x1 ? ":" : " ");
775
776         sprintf(buf + ret - 1, "\n");
777         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
778         return ret;
779 }
780
781 static ssize_t store_above_hispeed_delay(
782         struct cpufreq_interactive_tunables *tunables,
783         const char *buf, size_t count)
784 {
785         int ntokens;
786         unsigned int *new_above_hispeed_delay = NULL;
787         unsigned long flags;
788
789         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
790         if (IS_ERR(new_above_hispeed_delay))
791                 return PTR_RET(new_above_hispeed_delay);
792
793         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
794         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
795                 kfree(tunables->above_hispeed_delay);
796         tunables->above_hispeed_delay = new_above_hispeed_delay;
797         tunables->nabove_hispeed_delay = ntokens;
798         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
799         return count;
800
801 }
802
803 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
804                 char *buf)
805 {
806         return sprintf(buf, "%u\n", tunables->hispeed_freq);
807 }
808
809 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
810                 const char *buf, size_t count)
811 {
812         int ret;
813         long unsigned int val;
814
815         ret = strict_strtoul(buf, 0, &val);
816         if (ret < 0)
817                 return ret;
818         tunables->hispeed_freq = val;
819         return count;
820 }
821
822 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
823                 *tunables, char *buf)
824 {
825         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
826 }
827
828 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
829                 *tunables, const char *buf, size_t count)
830 {
831         int ret;
832         unsigned long val;
833
834         ret = strict_strtoul(buf, 0, &val);
835         if (ret < 0)
836                 return ret;
837         tunables->go_hispeed_load = val;
838         return count;
839 }
840
841 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
842                 *tunables, char *buf)
843 {
844         return sprintf(buf, "%lu\n", tunables->min_sample_time);
845 }
846
847 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
848                 *tunables, const char *buf, size_t count)
849 {
850         int ret;
851         unsigned long val;
852
853         ret = strict_strtoul(buf, 0, &val);
854         if (ret < 0)
855                 return ret;
856         tunables->min_sample_time = val;
857         return count;
858 }
859
860 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
861                 char *buf)
862 {
863         return sprintf(buf, "%lu\n", tunables->timer_rate);
864 }
865
866 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
867                 const char *buf, size_t count)
868 {
869         int ret;
870         unsigned long val;
871
872         ret = strict_strtoul(buf, 0, &val);
873         if (ret < 0)
874                 return ret;
875         tunables->timer_rate = val;
876         return count;
877 }
878
879 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
880                 char *buf)
881 {
882         return sprintf(buf, "%d\n", tunables->timer_slack_val);
883 }
884
885 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
886                 const char *buf, size_t count)
887 {
888         int ret;
889         unsigned long val;
890
891         ret = kstrtol(buf, 10, &val);
892         if (ret < 0)
893                 return ret;
894
895         tunables->timer_slack_val = val;
896         return count;
897 }
898
899 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
900                           char *buf)
901 {
902         return sprintf(buf, "%d\n", tunables->boost_val);
903 }
904
905 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
906                            const char *buf, size_t count)
907 {
908         int ret;
909         unsigned long val;
910
911         ret = kstrtoul(buf, 0, &val);
912         if (ret < 0)
913                 return ret;
914
915         tunables->boost_val = val;
916
917         if (tunables->boost_val) {
918                 trace_cpufreq_interactive_boost("on");
919                 if (!tunables->boosted)
920                         cpufreq_interactive_boost(tunables);
921         } else {
922                 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
923                 trace_cpufreq_interactive_unboost("off");
924         }
925
926         return count;
927 }
928
929 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
930                                 const char *buf, size_t count)
931 {
932         int ret;
933         unsigned long val;
934
935         ret = kstrtoul(buf, 0, &val);
936         if (ret < 0)
937                 return ret;
938
939         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
940                 tunables->boostpulse_duration_val;
941         trace_cpufreq_interactive_boost("pulse");
942         if (!tunables->boosted)
943                 cpufreq_interactive_boost(tunables);
944         return count;
945 }
946
947 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
948                 *tunables, char *buf)
949 {
950         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
951 }
952
953 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
954                 *tunables, const char *buf, size_t count)
955 {
956         int ret;
957         unsigned long val;
958
959         ret = kstrtoul(buf, 0, &val);
960         if (ret < 0)
961                 return ret;
962
963         tunables->boostpulse_duration_val = val;
964         return count;
965 }
966
967 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
968                 char *buf)
969 {
970         return sprintf(buf, "%u\n", tunables->io_is_busy);
971 }
972
973 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
974                 const char *buf, size_t count)
975 {
976         int ret;
977         unsigned long val;
978
979         ret = kstrtoul(buf, 0, &val);
980         if (ret < 0)
981                 return ret;
982         tunables->io_is_busy = val;
983         return count;
984 }
985
986 /*
987  * Create show/store routines
988  * - sys: One governor instance for complete SYSTEM
989  * - pol: One governor instance per struct cpufreq_policy
990  */
991 #define show_gov_pol_sys(file_name)                                     \
992 static ssize_t show_##file_name##_gov_sys                               \
993 (struct kobject *kobj, struct attribute *attr, char *buf)               \
994 {                                                                       \
995         return show_##file_name(common_tunables, buf);                  \
996 }                                                                       \
997                                                                         \
998 static ssize_t show_##file_name##_gov_pol                               \
999 (struct cpufreq_policy *policy, char *buf)                              \
1000 {                                                                       \
1001         return show_##file_name(policy->governor_data, buf);            \
1002 }
1003
1004 #define store_gov_pol_sys(file_name)                                    \
1005 static ssize_t store_##file_name##_gov_sys                              \
1006 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
1007         size_t count)                                                   \
1008 {                                                                       \
1009         return store_##file_name(common_tunables, buf, count);          \
1010 }                                                                       \
1011                                                                         \
1012 static ssize_t store_##file_name##_gov_pol                              \
1013 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
1014 {                                                                       \
1015         return store_##file_name(policy->governor_data, buf, count);    \
1016 }
1017
1018 #define show_store_gov_pol_sys(file_name)                               \
1019 show_gov_pol_sys(file_name);                                            \
1020 store_gov_pol_sys(file_name)
1021
1022 show_store_gov_pol_sys(target_loads);
1023 show_store_gov_pol_sys(above_hispeed_delay);
1024 show_store_gov_pol_sys(hispeed_freq);
1025 show_store_gov_pol_sys(go_hispeed_load);
1026 show_store_gov_pol_sys(min_sample_time);
1027 show_store_gov_pol_sys(timer_rate);
1028 show_store_gov_pol_sys(timer_slack);
1029 show_store_gov_pol_sys(boost);
1030 store_gov_pol_sys(boostpulse);
1031 show_store_gov_pol_sys(boostpulse_duration);
1032 show_store_gov_pol_sys(io_is_busy);
1033
1034 #define gov_sys_attr_rw(_name)                                          \
1035 static struct global_attr _name##_gov_sys =                             \
1036 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1037
1038 #define gov_pol_attr_rw(_name)                                          \
1039 static struct freq_attr _name##_gov_pol =                               \
1040 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1041
1042 #define gov_sys_pol_attr_rw(_name)                                      \
1043         gov_sys_attr_rw(_name);                                         \
1044         gov_pol_attr_rw(_name)
1045
1046 gov_sys_pol_attr_rw(target_loads);
1047 gov_sys_pol_attr_rw(above_hispeed_delay);
1048 gov_sys_pol_attr_rw(hispeed_freq);
1049 gov_sys_pol_attr_rw(go_hispeed_load);
1050 gov_sys_pol_attr_rw(min_sample_time);
1051 gov_sys_pol_attr_rw(timer_rate);
1052 gov_sys_pol_attr_rw(timer_slack);
1053 gov_sys_pol_attr_rw(boost);
1054 gov_sys_pol_attr_rw(boostpulse_duration);
1055 gov_sys_pol_attr_rw(io_is_busy);
1056
1057 static struct global_attr boostpulse_gov_sys =
1058         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1059
1060 static struct freq_attr boostpulse_gov_pol =
1061         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1062
1063 /* One Governor instance for entire system */
1064 static struct attribute *interactive_attributes_gov_sys[] = {
1065         &target_loads_gov_sys.attr,
1066         &above_hispeed_delay_gov_sys.attr,
1067         &hispeed_freq_gov_sys.attr,
1068         &go_hispeed_load_gov_sys.attr,
1069         &min_sample_time_gov_sys.attr,
1070         &timer_rate_gov_sys.attr,
1071         &timer_slack_gov_sys.attr,
1072         &boost_gov_sys.attr,
1073         &boostpulse_gov_sys.attr,
1074         &boostpulse_duration_gov_sys.attr,
1075         &io_is_busy_gov_sys.attr,
1076         NULL,
1077 };
1078
1079 static struct attribute_group interactive_attr_group_gov_sys = {
1080         .attrs = interactive_attributes_gov_sys,
1081         .name = "interactive",
1082 };
1083
1084 /* Per policy governor instance */
1085 static struct attribute *interactive_attributes_gov_pol[] = {
1086         &target_loads_gov_pol.attr,
1087         &above_hispeed_delay_gov_pol.attr,
1088         &hispeed_freq_gov_pol.attr,
1089         &go_hispeed_load_gov_pol.attr,
1090         &min_sample_time_gov_pol.attr,
1091         &timer_rate_gov_pol.attr,
1092         &timer_slack_gov_pol.attr,
1093         &boost_gov_pol.attr,
1094         &boostpulse_gov_pol.attr,
1095         &boostpulse_duration_gov_pol.attr,
1096         &io_is_busy_gov_pol.attr,
1097         NULL,
1098 };
1099
1100 static struct attribute_group interactive_attr_group_gov_pol = {
1101         .attrs = interactive_attributes_gov_pol,
1102         .name = "interactive",
1103 };
1104
1105 static struct attribute_group *get_sysfs_attr(void)
1106 {
1107         if (have_governor_per_policy())
1108                 return &interactive_attr_group_gov_pol;
1109         else
1110                 return &interactive_attr_group_gov_sys;
1111 }
1112
1113 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1114                                              unsigned long val,
1115                                              void *data)
1116 {
1117         if (time_before(jiffies, boot_done))
1118                 return 0;
1119
1120         switch (val) {
1121         case IDLE_START:
1122                 cpufreq_interactive_idle_start();
1123                 break;
1124         case IDLE_END:
1125                 cpufreq_interactive_idle_end();
1126                 break;
1127         }
1128
1129         return 0;
1130 }
1131
1132 static struct notifier_block cpufreq_interactive_idle_nb = {
1133         .notifier_call = cpufreq_interactive_idle_notifier,
1134 };
1135
1136 #if defined CONFIG_ARCH_SCX35LT8
1137
1138 void cpufreq_interactive_reboot_handler(unsigned int cpu)
1139 {
1140         unsigned int j;
1141         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
1142
1143         if(!pcpu->governor_enabled)
1144                 return;
1145         mutex_lock(&gov_lock);
1146         for_each_cpu(j, pcpu->policy->cpus) {
1147                 pcpu = &per_cpu(cpuinfo, j);
1148                 down_write(&pcpu->enable_sem);
1149                 pcpu->governor_enabled = 0;
1150                 del_timer_sync(&pcpu->cpu_timer);
1151                 del_timer_sync(&pcpu->cpu_slack_timer);
1152                 up_write(&pcpu->enable_sem);
1153                 }
1154         mutex_unlock(&gov_lock);
1155
1156         __cpufreq_driver_target(pcpu->policy,
1157                                                                 pcpu->policy->max,
1158                                                                 CPUFREQ_RELATION_H);
1159 }
1160 static int cpufreq_interactive_reboot_notifier(
1161         struct notifier_block *nb, unsigned long val, void *data)
1162 {
1163         unsigned int i;
1164
1165         if((SYS_RESTART == val)||(SYS_POWER_OFF == val)){
1166                 for_each_possible_cpu(i){
1167                         cpufreq_interactive_reboot_handler(i);
1168                         }
1169                 }
1170         return 0;
1171 }
1172 static struct notifier_block cpufreq_interactive_reboot_nb = {
1173         .notifier_call = cpufreq_interactive_reboot_notifier,
1174 };
1175 #endif
1176
1177 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1178                 unsigned int event)
1179 {
1180         int rc;
1181         unsigned int j;
1182         struct cpufreq_interactive_cpuinfo *pcpu;
1183         struct cpufreq_frequency_table *freq_table;
1184         struct cpufreq_interactive_tunables *tunables;
1185         unsigned long flags;
1186
1187         if (have_governor_per_policy())
1188                 tunables = policy->governor_data;
1189         else
1190                 tunables = common_tunables;
1191
1192         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1193
1194         switch (event) {
1195         case CPUFREQ_GOV_POLICY_INIT:
1196                 if (have_governor_per_policy()) {
1197                         WARN_ON(tunables);
1198                 } else if (tunables) {
1199                         tunables->usage_count++;
1200                         policy->governor_data = tunables;
1201                         return 0;
1202                 }
1203
1204                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1205                 if (!tunables) {
1206                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1207                         return -ENOMEM;
1208                 }
1209
1210                 tunables->usage_count = 1;
1211                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1212                 tunables->nabove_hispeed_delay =
1213                         ARRAY_SIZE(default_above_hispeed_delay);
1214                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1215                 tunables->target_loads = default_target_loads;
1216                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1217                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1218                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1219                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1220                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1221
1222                 spin_lock_init(&tunables->target_loads_lock);
1223                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1224
1225                 policy->governor_data = tunables;
1226                 if (!have_governor_per_policy())
1227                         common_tunables = tunables;
1228
1229                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1230                                 get_sysfs_attr());
1231                 if (rc) {
1232                         kfree(tunables);
1233                         policy->governor_data = NULL;
1234                         if (!have_governor_per_policy())
1235                                 common_tunables = NULL;
1236                         return rc;
1237                 }
1238
1239                 if (!policy->governor->initialized) {
1240                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1241                         cpufreq_register_notifier(&cpufreq_notifier_block,
1242                                         CPUFREQ_TRANSITION_NOTIFIER);
1243 #if defined CONFIG_ARCH_SCX35LT8
1244                         register_reboot_notifier(&cpufreq_interactive_reboot_nb);
1245 #endif
1246                 }
1247
1248                 break;
1249
1250         case CPUFREQ_GOV_POLICY_EXIT:
1251                 if (!--tunables->usage_count) {
1252                         if (policy->governor->initialized == 1) {
1253                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1254                                                 CPUFREQ_TRANSITION_NOTIFIER);
1255                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1256 #if defined CONFIG_ARCH_SCX35LT8
1257                                 unregister_reboot_notifier(&cpufreq_interactive_reboot_nb);
1258 #endif
1259                         }
1260
1261                         sysfs_remove_group(get_governor_parent_kobj(policy),
1262                                         get_sysfs_attr());
1263                         kfree(tunables);
1264                         common_tunables = NULL;
1265                 }
1266
1267                 policy->governor_data = NULL;
1268                 break;
1269
1270         case CPUFREQ_GOV_START:
1271                 mutex_lock(&gov_lock);
1272
1273                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1274                 if (!tunables->hispeed_freq)
1275                         tunables->hispeed_freq = policy->max;
1276
1277                 for_each_cpu(j, policy->cpus) {
1278                         pcpu = &per_cpu(cpuinfo, j);
1279                         pcpu->policy = policy;
1280                         pcpu->target_freq = policy->cur;
1281                         pcpu->freq_table = freq_table;
1282                         pcpu->floor_freq = pcpu->target_freq;
1283                         pcpu->floor_validate_time =
1284                                 ktime_to_us(ktime_get());
1285                         pcpu->hispeed_validate_time =
1286                                 pcpu->floor_validate_time;
1287                         pcpu->max_freq = policy->max;
1288                         down_write(&pcpu->enable_sem);
1289                         del_timer_sync(&pcpu->cpu_timer);
1290                         del_timer_sync(&pcpu->cpu_slack_timer);
1291                         cpufreq_interactive_timer_start(tunables, j);
1292                         pcpu->governor_enabled = 1;
1293                         up_write(&pcpu->enable_sem);
1294                 }
1295
1296                 mutex_unlock(&gov_lock);
1297                 break;
1298
1299         case CPUFREQ_GOV_STOP:
1300                 mutex_lock(&gov_lock);
1301                 for_each_cpu(j, policy->cpus) {
1302                         pcpu = &per_cpu(cpuinfo, j);
1303                         down_write(&pcpu->enable_sem);
1304                         pcpu->governor_enabled = 0;
1305                         del_timer_sync(&pcpu->cpu_timer);
1306                         del_timer_sync(&pcpu->cpu_slack_timer);
1307                         up_write(&pcpu->enable_sem);
1308                 }
1309
1310                 mutex_unlock(&gov_lock);
1311                 break;
1312
1313         case CPUFREQ_GOV_LIMITS:
1314                 if (policy->max < policy->cur)
1315                         __cpufreq_driver_target(policy,
1316                                         policy->max, CPUFREQ_RELATION_H);
1317                 else if (policy->min > policy->cur)
1318                         __cpufreq_driver_target(policy,
1319                                         policy->min, CPUFREQ_RELATION_L);
1320                 for_each_cpu(j, policy->cpus) {
1321                         pcpu = &per_cpu(cpuinfo, j);
1322
1323                         down_read(&pcpu->enable_sem);
1324                         if (pcpu->governor_enabled == 0) {
1325                                 up_read(&pcpu->enable_sem);
1326                                 continue;
1327                         }
1328
1329                         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1330                         if (policy->max < pcpu->target_freq)
1331                                 pcpu->target_freq = policy->max;
1332                         else if (policy->min > pcpu->target_freq)
1333                                 pcpu->target_freq = policy->min;
1334
1335                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1336                         up_read(&pcpu->enable_sem);
1337
1338                         /* Reschedule timer only if policy->max is raised.
1339                          * Delete the timers, else the timer callback may
1340                          * return without re-arm the timer when failed
1341                          * acquire the semaphore. This race may cause timer
1342                          * stopped unexpectedly.
1343                          */
1344
1345                         if (policy->max > pcpu->max_freq) {
1346                                 down_write(&pcpu->enable_sem);
1347                                 del_timer_sync(&pcpu->cpu_timer);
1348                                 del_timer_sync(&pcpu->cpu_slack_timer);
1349                                 cpufreq_interactive_timer_start(tunables, j);
1350                                 up_write(&pcpu->enable_sem);
1351                         }
1352
1353                         pcpu->max_freq = policy->max;
1354                 }
1355                 break;
1356         }
1357         return 0;
1358 }
1359
1360 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1361 static
1362 #endif
1363 struct cpufreq_governor cpufreq_gov_interactive = {
1364         .name = "interactive",
1365         .governor = cpufreq_governor_interactive,
1366         .max_transition_latency = 10000000,
1367         .owner = THIS_MODULE,
1368 };
1369
1370 static void cpufreq_interactive_nop_timer(unsigned long data)
1371 {
1372 }
1373
1374 static int __init cpufreq_interactive_init(void)
1375 {
1376         unsigned int i;
1377         struct cpufreq_interactive_cpuinfo *pcpu;
1378         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1379
1380         boot_done = jiffies + GOVERNOR_BOOT_TIME;
1381         /* Initalize per-cpu timers */
1382         for_each_possible_cpu(i) {
1383                 pcpu = &per_cpu(cpuinfo, i);
1384                 init_timer_deferrable(&pcpu->cpu_timer);
1385                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1386                 pcpu->cpu_timer.data = i;
1387                 init_timer(&pcpu->cpu_slack_timer);
1388                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1389                 spin_lock_init(&pcpu->load_lock);
1390                 spin_lock_init(&pcpu->target_freq_lock);
1391                 init_rwsem(&pcpu->enable_sem);
1392         }
1393
1394         spin_lock_init(&speedchange_cpumask_lock);
1395         mutex_init(&gov_lock);
1396         speedchange_task =
1397                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1398                                "cfinteractive");
1399         if (IS_ERR(speedchange_task))
1400                 return PTR_ERR(speedchange_task);
1401
1402         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1403         get_task_struct(speedchange_task);
1404
1405         /* NB: wake up so the thread does not look hung to the freezer */
1406         wake_up_process(speedchange_task);
1407
1408         return cpufreq_register_governor(&cpufreq_gov_interactive);
1409 }
1410
1411 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1412 fs_initcall(cpufreq_interactive_init);
1413 #else
1414 module_init(cpufreq_interactive_init);
1415 #endif
1416
1417 static void __exit cpufreq_interactive_exit(void)
1418 {
1419         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1420         kthread_stop(speedchange_task);
1421         put_task_struct(speedchange_task);
1422 }
1423
1424 module_exit(cpufreq_interactive_exit);
1425
1426 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1427 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1428         "Latency sensitive workloads");
1429 MODULE_LICENSE("GPL");