tizen 2.4 release
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / devfreq / governor_ondemand.c
1 /*
2  * Copyright (C) 2013 Spreadtrum Communications Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/devfreq.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/spinlock.h>
21 #include <linux/timer.h>
22 #ifdef CONFIG_HAS_EARLYSUSPEND
23 #include <linux/earlysuspend.h>
24 #endif
25 #include "governor.h"
26
27 /* Default constants for DevFreq-Ondemand (DFO) */
28 #define DFO_UPTHRESHOLD         (80)
29 #define DFO_DOWNDIFFERENCTIAL   (30)
30
31 /*
32 * TODO: add kernel space requests
33 */
34 #ifdef CONFIG_SPRD_SCX35_DMC_FREQ
35 extern void devfreq_min_freq_cnt_reset(unsigned int, unsigned int);
36 extern int devfreq_request_ignore(void);
37 #endif
38
39 static void dfs_req_timer_timeout(unsigned long arg);
40 #define REQ_TIMEOUT_DEF (HZ/20);
41 static DEFINE_SPINLOCK(dfs_req_lock);
42 static unsigned int dfs_req_timeout;
43 static DEFINE_TIMER(dfs_req_timer, dfs_req_timer_timeout, 0, 0);
44 struct dfs_request_state{
45         int req_sum;  /* in KHz */
46         int req_timeout;  /* in KHz */
47         int req_quirk;  /* in KHz */
48         u32 ddr_freq_after_req;  /* in KHz */
49 };
50 static struct dfs_request_state user_requests;
51 static struct devfreq *g_devfreq; /* for requests from kernel */
52 static int gov_eb = 1;
53 struct userspace_data {
54         int req_bw;
55         unsigned long set_freq;
56         unsigned long set_count;
57         unsigned long upthreshold;
58         unsigned long downdifferential;
59         unsigned long (*convert_bw_to_freq)(u32 req_bw);
60         bool enable; /*sysfs only*/
61         bool devfreq_enable;
62 };
63
64 /************ kernel interface *****************/
65 /*
66 * get dfs enable flag
67 * enabled == 0 --> dfs disable
68 * enabled == 1 --> dfs enable
69 */
70 bool dfs_get_enable(void)
71 {
72         struct userspace_data *user_data;
73         bool enabled = false;
74
75         if(g_devfreq && g_devfreq->data){
76                 user_data = (struct userspace_data *)(g_devfreq->data);
77                 enabled = user_data->enable;
78                 pr_debug("%s, enabled = %d, user_data->enable = %d\n", __func__, enabled, user_data->enable);
79         }
80
81         return enabled;
82 }
83 EXPORT_SYMBOL(dfs_get_enable);
84 /*
85 * set ddr frequnecy
86 * @freq: KHz
87 * if ddr frequency is set through this function, DVS is disabled
88 */
89 int dfs_set_freq(int freq)
90 {
91         struct userspace_data *user_data;
92         int err;
93
94         if(freq < 0){
95                 err = -1;
96                 pr_debug("*** %s,freq < 0\n",__func__);
97                 goto done;
98         }
99
100         user_data = (struct userspace_data *)(g_devfreq->data);
101         mutex_lock(&g_devfreq->lock);
102         if(user_data){
103                 if(freq > 0){
104                         user_data->set_count++;
105                         user_data->devfreq_enable = false;
106                         if(freq > user_data->set_freq)
107                                 user_data->set_freq = freq;
108                 }else{
109                         if(user_data->set_count > 0){
110                                 user_data->set_count--;
111                                 if(user_data->set_count == 0){
112                                         user_data->set_freq = 0;
113                                         user_data->devfreq_enable = true;
114                                 }
115                         }
116                 }
117                 pr_debug("*** %s, set freq:%d KHz, set_count:%lu ***\n", __func__, freq, user_data->set_count );
118         }
119         else
120         {
121                 pr_debug("*** %s,user_data == 0\n",__func__);
122         }
123         err = update_devfreq(g_devfreq);
124         mutex_unlock(&g_devfreq->lock); 
125 done:
126         return err;
127 }
128
129 /*
130 *  add a new ddr bandwidth request.
131 *  @req_bw: KB
132 *      + addition(add>=0) or - subtraction(add<0)
133 */
134 void dfs_request_bw(int req_bw)
135 {
136         u32 req_freq = 0;
137         int add = 1;
138         struct userspace_data *user_data;
139
140         if (req_bw < 0) {
141                 req_bw = -req_bw;
142                 add = -1;
143         }
144
145         if(g_devfreq && g_devfreq->data){
146                 user_data = (struct userspace_data *)(g_devfreq->data);
147                 if(user_data->convert_bw_to_freq){
148                         req_freq = (user_data->convert_bw_to_freq)(req_bw);
149                 }
150         }
151         pr_debug("*** %s, pid:%u, %creq_bw:%u, req_freq:%u ***\n",
152                                 __func__, current->pid, add>=0?'+':'-', req_bw, req_freq );
153         if(req_freq){
154                 mutex_lock(&g_devfreq->lock);
155                 if(add >= 0)
156                         user_requests.req_sum += req_freq;
157                 else
158                         user_requests.req_sum -= req_freq;
159                 if(user_requests.req_sum < 0)
160                         user_requests.req_sum = 0;
161                 update_devfreq(g_devfreq);
162                 mutex_unlock(&g_devfreq->lock);
163         }
164 }
165
166 /*
167 * set request timer timeout
168 * @timeout, ms
169 */
170 void dfs_req_set_timeout(unsigned int timeout)
171 {
172         spin_lock(&dfs_req_lock);
173         dfs_req_timeout = msecs_to_jiffies(timeout);
174         spin_unlock(&dfs_req_lock);
175 }
176 EXPORT_SYMBOL(dfs_req_set_timeout);
177
178 /*
179 * get request timer timeout
180 * @return, ms
181 */
182 unsigned int dfs_req_get_timeout(void)
183 {
184         unsigned int timeout;
185
186         spin_lock(&dfs_req_lock);
187         timeout = dfs_req_timeout;
188         spin_unlock(&dfs_req_lock);
189
190         timeout = jiffies_to_msecs(timeout);
191         return timeout;
192 }
193 EXPORT_SYMBOL(dfs_req_get_timeout);
194
195 static void dfs_req_timer_timeout(unsigned long arg)
196 {
197         spin_lock(&dfs_req_lock);
198         user_requests.req_timeout = 0;
199         spin_unlock(&dfs_req_lock);
200         return;
201 }
202
203 /*
204 *  add a new ddr bandwidth request. when time is up, request is cleared automatically
205 *  @req_bw, KB
206 */
207 void dfs_request_bw_timeout(unsigned int req_bw)
208 {
209         struct userspace_data *user_data;
210         unsigned int req_freq;
211
212         req_freq = 0;
213         if(req_bw == 0)
214                 return;
215
216         spin_lock(&dfs_req_lock);
217         if( user_requests.req_timeout ){
218                 spin_unlock(&dfs_req_lock);
219                 pr_debug("*** %s, ignore, req_timeout:%d ***\n", __func__, user_requests.req_timeout);
220                 return;
221         }
222         spin_unlock(&dfs_req_lock);
223
224         if(g_devfreq && g_devfreq->data){
225                 user_data = (struct userspace_data *)(g_devfreq->data);
226                 if(user_data->convert_bw_to_freq){
227                         req_freq = (user_data->convert_bw_to_freq)(req_bw);
228                         printk("*** %s, req_freq:%d ***\n", __func__, req_freq );
229                 }
230         }
231         spin_lock(&dfs_req_lock);
232         user_requests.req_timeout = req_freq;
233         spin_unlock(&dfs_req_lock);
234
235         if(req_freq)
236                 mod_timer(&dfs_req_timer, jiffies+dfs_req_timeout);
237         else
238                 del_timer_sync(&dfs_req_timer);
239
240         if(req_freq){
241                 mutex_lock(&g_devfreq->lock);
242                 update_devfreq(g_devfreq);
243                 mutex_unlock(&g_devfreq->lock);
244         }
245 }
246 EXPORT_SYMBOL(dfs_request_bw_timeout);
247
248 /*
249 *  raise ddr frequency up temporarily
250 *  @req_bw, KB
251 */
252 #ifdef CONFIG_SPRD_SCX35_DMC_FREQ
253 void dfs_freq_raise_quirk(unsigned int req_bw)
254 {
255         if(req_bw == 0)
256                 return;
257
258         spin_lock(&dfs_req_lock);
259         if(user_requests.req_quirk || devfreq_request_ignore() ){
260                 spin_unlock(&dfs_req_lock);
261                 return;
262         }
263         user_requests.req_quirk = req_bw;
264         spin_unlock(&dfs_req_lock);
265
266         mutex_lock(&g_devfreq->lock);
267         devfreq_min_freq_cnt_reset(-1, 1);
268         update_devfreq(g_devfreq);
269         devfreq_min_freq_cnt_reset(-1, 0);
270         user_requests.req_quirk = 0;
271         mutex_unlock(&g_devfreq->lock);
272 }
273 EXPORT_SYMBOL(dfs_freq_raise_quirk);
274 #endif
275
276 /************ early suspend  *****************/
277 #ifdef CONFIG_HAS_EARLYSUSPEND
278 static void devfreq_early_suspend(struct early_suspend *h)
279 {
280 #ifdef CONFIG_ARCH_SCX15
281         dfs_set_freq(192000);
282 #else
283 #ifdef CONFIG_ARCH_SCX35
284         dfs_set_freq(200000);
285 #endif
286 #endif
287         gov_eb = 0;
288 }
289
290 static void devfreq_late_resume(struct early_suspend *h)
291 {
292         dfs_set_freq(0);
293 }
294 static struct early_suspend devfreq_early_suspend_desc = {
295         .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 100,
296         .suspend = devfreq_early_suspend,
297         .resume = devfreq_late_resume,
298 };
299
300 static void devfreq_enable_late_resume(struct early_suspend *h)
301 {
302         gov_eb = 1;
303 }
304 static struct early_suspend devfreq_enable_desc = {
305         .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN,
306         .resume = devfreq_enable_late_resume,
307 };
308 #endif
309 /************ userspace interface *****************/
310
311 static ssize_t store_upthreshold(struct device *dev, struct device_attribute *attr,
312                           const char *buf, size_t count)
313 {
314         struct devfreq *devfreq = to_devfreq(dev);
315         struct userspace_data *data;
316         unsigned long wanted;
317
318
319         mutex_lock(&devfreq->lock);
320         data = devfreq->data;
321         sscanf(buf, "%lu", &wanted);
322         if(data)
323                 data->upthreshold = wanted;
324         mutex_unlock(&devfreq->lock);
325         return count;
326 }
327
328 static ssize_t show_upthreshold(struct device *dev, struct device_attribute *attr,
329                          char *buf)
330 {
331         struct devfreq *devfreq = to_devfreq(dev);
332         struct userspace_data *data;
333         int err = 0;
334
335         mutex_lock(&devfreq->lock);
336         data = devfreq->data;
337         if(data){
338                 err = sprintf(buf, "%lu\n", data->upthreshold);
339         }else
340                 err = sprintf(buf, "%d\n", DFO_UPTHRESHOLD);
341         mutex_unlock(&devfreq->lock);
342         return err;
343 }
344
345 static ssize_t store_downdifferential(struct device *dev, struct device_attribute *attr,
346                         const char *buf, size_t count)
347 {
348         struct devfreq *devfreq = to_devfreq(dev);
349         struct userspace_data *data;
350         unsigned long wanted;
351
352
353         mutex_lock(&devfreq->lock);
354         data = devfreq->data;
355         sscanf(buf, "%lu", &wanted);
356         if(data)
357                 data->downdifferential = wanted;
358         mutex_unlock(&devfreq->lock);
359         return count;
360 }
361
362 static ssize_t show_downdifferential(struct device *dev, struct device_attribute *attr,
363                         char *buf)
364 {
365         struct devfreq *devfreq = to_devfreq(dev);
366         struct userspace_data *data;
367         int err = 0;
368
369         mutex_lock(&devfreq->lock);
370         data = devfreq->data;
371         if(data){
372                 err = sprintf(buf, "%lu\n", data->downdifferential);
373         }else{
374                 err = sprintf(buf, "%d\n", DFO_DOWNDIFFERENCTIAL);
375         }
376         mutex_unlock(&devfreq->lock);
377         return err;
378 }
379
380
381 static ssize_t store_request(struct device *dev, struct device_attribute *attr,
382                 const char *buf, size_t count)
383 {
384         struct devfreq *devfreq = to_devfreq(dev);
385         struct userspace_data *data;
386         int wanted;
387         int req_freq;
388         int err = 0;
389
390         req_freq = 0;
391         mutex_lock(&devfreq->lock);
392         data = devfreq->data;
393         sscanf(buf, "%d", &wanted);
394         if(data){
395                 data->req_bw += wanted;
396                 pr_debug("*** %s, request:%d, total request:%d ***\n",
397                                 __func__, wanted, data->req_bw);
398                 if(data->req_bw < 0)
399                         data->req_bw = 0;
400                 if(data->convert_bw_to_freq)
401                         req_freq = data->convert_bw_to_freq(wanted);
402         }
403         user_requests.req_sum += req_freq;
404         if(user_requests.req_sum < 0)
405                 user_requests.req_sum = 0;
406         err = update_devfreq(devfreq);
407         if (err == 0)
408                 err = count;
409         mutex_unlock(&devfreq->lock);
410         return err;
411
412 }
413
414 static ssize_t show_request(struct device *dev, struct device_attribute *attr,
415                 char *buf)
416 {
417         struct devfreq *devfreq = to_devfreq(dev);
418         struct userspace_data *data;
419         int err = 0;
420
421         mutex_lock(&devfreq->lock);
422         data = devfreq->data;
423         if(data)
424                 err = sprintf(buf, "%d KB\n", data->req_bw);
425         mutex_unlock(&devfreq->lock);
426         return err;
427 }
428
429 static ssize_t store_enable(struct device *dev, struct device_attribute *attr,
430                 const char *buf, size_t count)
431 {
432         struct devfreq *devfreq = to_devfreq(dev);
433         struct userspace_data *data;
434         unsigned long wanted;
435
436         mutex_lock(&devfreq->lock);
437         data = devfreq->data;
438         sscanf(buf, "%lu", &wanted);
439         if(data){
440                 data->enable = wanted;
441         }
442         mutex_unlock(&devfreq->lock);
443         return count;
444 }
445
446 static ssize_t show_enable(struct device *dev, struct device_attribute *attr,
447                 char *buf)
448 {
449         struct devfreq *devfreq = to_devfreq(dev);
450         struct userspace_data *data;
451         int err = 0;
452
453         mutex_lock(&devfreq->lock);
454         data = devfreq->data;
455         if(data)
456                 err = sprintf(buf, "%d \n", data->enable);
457         mutex_unlock(&devfreq->lock);
458         return err;
459 }
460
461 static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
462                 const char *buf, size_t count)
463 {
464         struct devfreq *devfreq = to_devfreq(dev);
465         struct userspace_data *data;
466         unsigned long wanted;
467         int err = 0;
468
469         sscanf(buf, "%lu", &wanted);
470         err = dfs_set_freq(wanted);
471         if (err == 0)
472                 err = count;
473         return err;
474 }
475
476 static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
477                 char *buf)
478 {
479         struct devfreq *devfreq = to_devfreq(dev);
480         struct userspace_data *data;
481         int err = 0;
482
483         mutex_lock(&devfreq->lock);
484         data = devfreq->data;
485         if(data)
486                 err = sprintf(buf, "%lu KHz, set count:%lu\n", data->set_freq, data->set_count);
487         mutex_unlock(&devfreq->lock);
488         return err;
489 }
490
491 static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
492 static DEVICE_ATTR(set_enable, 0644, show_enable, store_enable);
493 static DEVICE_ATTR(set_request, 0644, show_request, store_request);
494 static DEVICE_ATTR(set_upthreshold, 0644, show_upthreshold, store_upthreshold);
495 static DEVICE_ATTR(set_downdifferential, 0644, show_downdifferential, store_downdifferential);
496 static struct attribute *dev_entries[] = {
497         &dev_attr_set_freq.attr,
498         &dev_attr_set_enable.attr,
499         &dev_attr_set_request.attr,
500         &dev_attr_set_upthreshold.attr,
501         &dev_attr_set_downdifferential.attr,
502         NULL,
503 };
504 static struct attribute_group dev_attr_group = {
505         .name   = "ondemand",
506         .attrs  = dev_entries,
507 };
508
509 static int devfreq_ondemand_start(struct devfreq *devfreq)
510 {
511         int err = 0;
512         struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
513                         GFP_KERNEL);
514
515         if (!data) {
516                 err = -ENOMEM;
517                 goto out;
518         }
519         data->req_bw = 0;
520         data->set_freq = 0;
521         data->upthreshold = DFO_UPTHRESHOLD;
522         data->downdifferential = DFO_DOWNDIFFERENCTIAL;
523 #if defined(CONFIG_ARCH_SCX30G)
524         data->enable = false;
525 #else
526         data->enable = true;
527 #endif
528         data->devfreq_enable = true;
529         if(devfreq->data){
530                 data->convert_bw_to_freq = devfreq->data;
531                 pr_info("*** %s, data->convert_bw_to_freq:%pf ***\n", __func__, data->convert_bw_to_freq);
532         }
533         devfreq->data = data;
534         g_devfreq = devfreq;
535         err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
536 #ifdef CONFIG_HAS_EARLYSUSPEND
537         register_early_suspend(&devfreq_early_suspend_desc);
538         /*
539         * disable DFS before DISPC late resume
540         */
541         register_early_suspend(&devfreq_enable_desc);
542 #endif
543         spin_lock(&dfs_req_lock);
544         dfs_req_timeout = REQ_TIMEOUT_DEF;
545         spin_unlock(&dfs_req_lock);
546 out:
547         return err;
548 }
549
550 static int devfreq_ondemand_stop(struct devfreq *devfreq)
551 {
552         int err = 0;
553         if(devfreq->data){
554                 kfree(devfreq->data);
555         }
556
557         return err;
558
559 }
560 /************ userspace interface *****************/
561
562 static int devfreq_ondemand_func(struct devfreq *df,
563                                         unsigned long *freq)
564 {
565         struct devfreq_dev_status stat;
566         int err = df->profile->get_dev_status(df->dev.parent, &stat);
567         unsigned long long a, b;
568         unsigned int dfso_upthreshold = DFO_UPTHRESHOLD;
569         unsigned int dfso_downdifferential = DFO_DOWNDIFFERENCTIAL;
570         struct userspace_data *data = df->data;
571         unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
572         unsigned long req_freq;
573
574         if (err)
575                 return err;
576
577         if(user_requests.req_quirk &&
578                 stat.current_frequency==df->min_freq){
579                 /*
580                 * QUIRK: only set frequency larger than minimum
581                 */
582                 *freq = df->min_freq + 1;
583                 pr_debug("*** %s, req_quirk, freq:%lu, return ***\n", __func__, *freq);
584                 return 0;
585         }
586         /*
587         * TODO: add request frequency
588         */
589         req_freq = user_requests.req_sum + user_requests.req_timeout;
590
591         if (data) {
592                 if (data->enable==false || !(data->devfreq_enable) ||
593                                         data->set_freq || !gov_eb){
594                         if(user_requests.ddr_freq_after_req == 0)
595                                 user_requests.ddr_freq_after_req = max;
596                         *freq = (data->set_freq?data->set_freq:user_requests.ddr_freq_after_req);
597                         pr_debug("*** %s, data->enable:%d, data->set_freq:%u, gov_eb:%d ***\n",
598                                 __func__, data->enable, data->set_freq, gov_eb );
599                         return 0;
600                 }
601                 if (data->upthreshold)
602                         dfso_upthreshold = data->upthreshold;
603                 if (data->downdifferential)
604                         dfso_downdifferential = data->downdifferential;
605         }else{
606                 printk("*** %s, data is NULL ***\n", __func__ );
607         }
608
609         if (dfso_upthreshold > 100 ||
610             dfso_upthreshold < dfso_downdifferential){
611                 printk("*** %s, dfso_upthreshold:%u, dfso_downdifferential:%u ***\n",
612                                 __func__, dfso_upthreshold, dfso_downdifferential );
613                 return -EINVAL;
614         }
615
616         /* Assume MAX if it is going to be divided by zero */
617         if (stat.total_time == 0) {
618                 *freq = max;
619                 user_requests.ddr_freq_after_req = *freq;
620                 pr_debug("*** %s, stat.total_time == 0, freq:%lu ***\n", __func__, *freq);
621                 return 0;
622         }
623
624         /* Prevent overflow */
625         if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
626                 stat.busy_time >>= 7;
627                 stat.total_time >>= 7;
628         }
629
630         /* Set MAX if it's busy enough */
631         if (stat.busy_time * 100 >
632             stat.total_time * dfso_upthreshold) {
633                 *freq = max;
634                 user_requests.ddr_freq_after_req = *freq;
635                 pr_debug("*** %s, set max freq:%lu ***\n", __func__, *freq);
636                 return 0;
637         }
638
639         /* Set MAX if we do not know the initial frequency */
640         if (stat.current_frequency == 0) {
641                 *freq = max;
642                 user_requests.ddr_freq_after_req = *freq;
643                 pr_debug("*** %s, stat.current_frequency == 0, freq:%lu ***\n", __func__, *freq);
644                 return 0;
645         }
646
647         /* Keep the current frequency */
648         if (stat.busy_time * 100 >
649             stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
650                 *freq = stat.current_frequency + req_freq;
651                 user_requests.ddr_freq_after_req = *freq;
652                 pr_debug("*** %s, Keep the current frequency %lu, req_freq:%lu ***\n",
653                                 __func__, stat.current_frequency, req_freq);
654                 return 0;
655         }
656
657         /* Set the desired frequency based on the load */
658         a = stat.busy_time;
659         a *= stat.current_frequency;
660         b = div_u64(a, stat.total_time);
661         b *= 100;
662         b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
663         *freq = (unsigned long) b + req_freq;
664         user_requests.ddr_freq_after_req = *freq;
665         pr_debug("*** %s, calculate freq:%lu, req_freq:%lu ***\n",
666                                 __func__, (unsigned long)b, req_freq);
667
668         if (df->min_freq && *freq < df->min_freq)
669                 *freq = df->min_freq;
670         if (df->max_freq && *freq > df->max_freq)
671                 *freq = df->max_freq;
672
673         return 0;
674 }
675
676 static int devfreq_ondemand_handler(struct devfreq *devfreq,
677                                 unsigned int event, void *data)
678 {
679         switch (event) {
680         case DEVFREQ_GOV_START:
681                 devfreq_ondemand_start(devfreq);
682                 devfreq_monitor_start(devfreq);
683                 break;
684
685         case DEVFREQ_GOV_STOP:
686                 devfreq_monitor_stop(devfreq);
687                 //devfreq_ondemand_stop(devfreq);
688                 break;
689
690         case DEVFREQ_GOV_INTERVAL:
691                 devfreq_interval_update(devfreq, (unsigned int *)data);
692                 break;
693
694         case DEVFREQ_GOV_SUSPEND:
695                 devfreq_monitor_suspend(devfreq);
696                 break;
697
698         case DEVFREQ_GOV_RESUME:
699                 devfreq_monitor_resume(devfreq);
700                 break;
701
702         default:
703                 break;
704         }
705
706         return 0;
707 }
708
709 const struct devfreq_governor devfreq_ondemand = {
710         .name = "ondemand",
711         .get_target_freq = devfreq_ondemand_func,
712         .event_handler = devfreq_ondemand_handler,
713 };
714
715 static int __init devfreq_ondemand_init(void)
716 {
717         return devfreq_add_governor(&devfreq_ondemand);
718 }
719 subsys_initcall(devfreq_ondemand_init);
720
721 static void __exit devfreq_ondemand_exit(void)
722 {
723         int ret;
724
725         ret = devfreq_remove_governor(&devfreq_ondemand);
726         if (ret)
727                 pr_err("%s: failed remove governor %d\n", __func__, ret);
728
729         return;
730 }
731 module_exit(devfreq_ondemand_exit);
732 MODULE_LICENSE("GPL");