2 * Copyright (C) 2013 Spreadtrum Communications Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/devfreq.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/spinlock.h>
21 #include <linux/timer.h>
22 #ifdef CONFIG_HAS_EARLYSUSPEND
23 #include <linux/earlysuspend.h>
27 /* Default constants for DevFreq-Ondemand (DFO) */
28 #define DFO_UPTHRESHOLD (80)
29 #define DFO_DOWNDIFFERENCTIAL (30)
32 * TODO: add kernel space requests
34 #ifdef CONFIG_SPRD_SCX35_DMC_FREQ
35 extern void devfreq_min_freq_cnt_reset(unsigned int, unsigned int);
36 extern int devfreq_request_ignore(void);
39 static void dfs_req_timer_timeout(unsigned long arg);
40 #define REQ_TIMEOUT_DEF (HZ/20);
41 static DEFINE_SPINLOCK(dfs_req_lock);
42 static unsigned int dfs_req_timeout;
43 static DEFINE_TIMER(dfs_req_timer, dfs_req_timer_timeout, 0, 0);
44 struct dfs_request_state{
45 int req_sum; /* in KHz */
46 int req_timeout; /* in KHz */
47 int req_quirk; /* in KHz */
48 u32 ddr_freq_after_req; /* in KHz */
50 static struct dfs_request_state user_requests;
51 static struct devfreq *g_devfreq; /* for requests from kernel */
52 static int gov_eb = 1;
53 struct userspace_data {
55 unsigned long set_freq;
56 unsigned long set_count;
57 unsigned long upthreshold;
58 unsigned long downdifferential;
59 unsigned long (*convert_bw_to_freq)(u32 req_bw);
60 bool enable; /*sysfs only*/
64 /************ kernel interface *****************/
67 * enabled == 0 --> dfs disable
68 * enabled == 1 --> dfs enable
70 bool dfs_get_enable(void)
72 struct userspace_data *user_data;
75 if(g_devfreq && g_devfreq->data){
76 user_data = (struct userspace_data *)(g_devfreq->data);
77 enabled = user_data->enable;
78 pr_debug("%s, enabled = %d, user_data->enable = %d\n", __func__, enabled, user_data->enable);
83 EXPORT_SYMBOL(dfs_get_enable);
87 * if ddr frequency is set through this function, DVS is disabled
89 int dfs_set_freq(int freq)
91 struct userspace_data *user_data;
96 pr_debug("*** %s,freq < 0\n",__func__);
100 user_data = (struct userspace_data *)(g_devfreq->data);
101 mutex_lock(&g_devfreq->lock);
104 user_data->set_count++;
105 user_data->devfreq_enable = false;
106 if(freq > user_data->set_freq)
107 user_data->set_freq = freq;
109 if(user_data->set_count > 0){
110 user_data->set_count--;
111 if(user_data->set_count == 0){
112 user_data->set_freq = 0;
113 user_data->devfreq_enable = true;
117 pr_debug("*** %s, set freq:%d KHz, set_count:%lu ***\n", __func__, freq, user_data->set_count );
121 pr_debug("*** %s,user_data == 0\n",__func__);
123 err = update_devfreq(g_devfreq);
124 mutex_unlock(&g_devfreq->lock);
130 * add a new ddr bandwidth request.
132 * + addition(add>=0) or - subtraction(add<0)
134 void dfs_request_bw(int req_bw)
138 struct userspace_data *user_data;
145 if(g_devfreq && g_devfreq->data){
146 user_data = (struct userspace_data *)(g_devfreq->data);
147 if(user_data->convert_bw_to_freq){
148 req_freq = (user_data->convert_bw_to_freq)(req_bw);
151 pr_debug("*** %s, pid:%u, %creq_bw:%u, req_freq:%u ***\n",
152 __func__, current->pid, add>=0?'+':'-', req_bw, req_freq );
154 mutex_lock(&g_devfreq->lock);
156 user_requests.req_sum += req_freq;
158 user_requests.req_sum -= req_freq;
159 if(user_requests.req_sum < 0)
160 user_requests.req_sum = 0;
161 update_devfreq(g_devfreq);
162 mutex_unlock(&g_devfreq->lock);
167 * set request timer timeout
170 void dfs_req_set_timeout(unsigned int timeout)
172 spin_lock(&dfs_req_lock);
173 dfs_req_timeout = msecs_to_jiffies(timeout);
174 spin_unlock(&dfs_req_lock);
176 EXPORT_SYMBOL(dfs_req_set_timeout);
179 * get request timer timeout
182 unsigned int dfs_req_get_timeout(void)
184 unsigned int timeout;
186 spin_lock(&dfs_req_lock);
187 timeout = dfs_req_timeout;
188 spin_unlock(&dfs_req_lock);
190 timeout = jiffies_to_msecs(timeout);
193 EXPORT_SYMBOL(dfs_req_get_timeout);
195 static void dfs_req_timer_timeout(unsigned long arg)
197 spin_lock(&dfs_req_lock);
198 user_requests.req_timeout = 0;
199 spin_unlock(&dfs_req_lock);
204 * add a new ddr bandwidth request. when time is up, request is cleared automatically
207 void dfs_request_bw_timeout(unsigned int req_bw)
209 struct userspace_data *user_data;
210 unsigned int req_freq;
216 spin_lock(&dfs_req_lock);
217 if( user_requests.req_timeout ){
218 spin_unlock(&dfs_req_lock);
219 pr_debug("*** %s, ignore, req_timeout:%d ***\n", __func__, user_requests.req_timeout);
222 spin_unlock(&dfs_req_lock);
224 if(g_devfreq && g_devfreq->data){
225 user_data = (struct userspace_data *)(g_devfreq->data);
226 if(user_data->convert_bw_to_freq){
227 req_freq = (user_data->convert_bw_to_freq)(req_bw);
228 printk("*** %s, req_freq:%d ***\n", __func__, req_freq );
231 spin_lock(&dfs_req_lock);
232 user_requests.req_timeout = req_freq;
233 spin_unlock(&dfs_req_lock);
236 mod_timer(&dfs_req_timer, jiffies+dfs_req_timeout);
238 del_timer_sync(&dfs_req_timer);
241 mutex_lock(&g_devfreq->lock);
242 update_devfreq(g_devfreq);
243 mutex_unlock(&g_devfreq->lock);
246 EXPORT_SYMBOL(dfs_request_bw_timeout);
249 * raise ddr frequency up temporarily
252 #ifdef CONFIG_SPRD_SCX35_DMC_FREQ
253 void dfs_freq_raise_quirk(unsigned int req_bw)
258 spin_lock(&dfs_req_lock);
259 if(user_requests.req_quirk || devfreq_request_ignore() ){
260 spin_unlock(&dfs_req_lock);
263 user_requests.req_quirk = req_bw;
264 spin_unlock(&dfs_req_lock);
266 mutex_lock(&g_devfreq->lock);
267 devfreq_min_freq_cnt_reset(-1, 1);
268 update_devfreq(g_devfreq);
269 devfreq_min_freq_cnt_reset(-1, 0);
270 user_requests.req_quirk = 0;
271 mutex_unlock(&g_devfreq->lock);
273 EXPORT_SYMBOL(dfs_freq_raise_quirk);
276 /************ early suspend *****************/
277 #ifdef CONFIG_HAS_EARLYSUSPEND
278 static void devfreq_early_suspend(struct early_suspend *h)
280 #ifdef CONFIG_ARCH_SCX15
281 dfs_set_freq(192000);
283 #ifdef CONFIG_ARCH_SCX35
284 dfs_set_freq(200000);
290 static void devfreq_late_resume(struct early_suspend *h)
294 static struct early_suspend devfreq_early_suspend_desc = {
295 .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 100,
296 .suspend = devfreq_early_suspend,
297 .resume = devfreq_late_resume,
300 static void devfreq_enable_late_resume(struct early_suspend *h)
304 static struct early_suspend devfreq_enable_desc = {
305 .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN,
306 .resume = devfreq_enable_late_resume,
309 /************ userspace interface *****************/
311 static ssize_t store_upthreshold(struct device *dev, struct device_attribute *attr,
312 const char *buf, size_t count)
314 struct devfreq *devfreq = to_devfreq(dev);
315 struct userspace_data *data;
316 unsigned long wanted;
319 mutex_lock(&devfreq->lock);
320 data = devfreq->data;
321 sscanf(buf, "%lu", &wanted);
323 data->upthreshold = wanted;
324 mutex_unlock(&devfreq->lock);
328 static ssize_t show_upthreshold(struct device *dev, struct device_attribute *attr,
331 struct devfreq *devfreq = to_devfreq(dev);
332 struct userspace_data *data;
335 mutex_lock(&devfreq->lock);
336 data = devfreq->data;
338 err = sprintf(buf, "%lu\n", data->upthreshold);
340 err = sprintf(buf, "%d\n", DFO_UPTHRESHOLD);
341 mutex_unlock(&devfreq->lock);
345 static ssize_t store_downdifferential(struct device *dev, struct device_attribute *attr,
346 const char *buf, size_t count)
348 struct devfreq *devfreq = to_devfreq(dev);
349 struct userspace_data *data;
350 unsigned long wanted;
353 mutex_lock(&devfreq->lock);
354 data = devfreq->data;
355 sscanf(buf, "%lu", &wanted);
357 data->downdifferential = wanted;
358 mutex_unlock(&devfreq->lock);
362 static ssize_t show_downdifferential(struct device *dev, struct device_attribute *attr,
365 struct devfreq *devfreq = to_devfreq(dev);
366 struct userspace_data *data;
369 mutex_lock(&devfreq->lock);
370 data = devfreq->data;
372 err = sprintf(buf, "%lu\n", data->downdifferential);
374 err = sprintf(buf, "%d\n", DFO_DOWNDIFFERENCTIAL);
376 mutex_unlock(&devfreq->lock);
381 static ssize_t store_request(struct device *dev, struct device_attribute *attr,
382 const char *buf, size_t count)
384 struct devfreq *devfreq = to_devfreq(dev);
385 struct userspace_data *data;
391 mutex_lock(&devfreq->lock);
392 data = devfreq->data;
393 sscanf(buf, "%d", &wanted);
395 data->req_bw += wanted;
396 pr_debug("*** %s, request:%d, total request:%d ***\n",
397 __func__, wanted, data->req_bw);
400 if(data->convert_bw_to_freq)
401 req_freq = data->convert_bw_to_freq(wanted);
403 user_requests.req_sum += req_freq;
404 if(user_requests.req_sum < 0)
405 user_requests.req_sum = 0;
406 err = update_devfreq(devfreq);
409 mutex_unlock(&devfreq->lock);
414 static ssize_t show_request(struct device *dev, struct device_attribute *attr,
417 struct devfreq *devfreq = to_devfreq(dev);
418 struct userspace_data *data;
421 mutex_lock(&devfreq->lock);
422 data = devfreq->data;
424 err = sprintf(buf, "%d KB\n", data->req_bw);
425 mutex_unlock(&devfreq->lock);
429 static ssize_t store_enable(struct device *dev, struct device_attribute *attr,
430 const char *buf, size_t count)
432 struct devfreq *devfreq = to_devfreq(dev);
433 struct userspace_data *data;
434 unsigned long wanted;
436 mutex_lock(&devfreq->lock);
437 data = devfreq->data;
438 sscanf(buf, "%lu", &wanted);
440 data->enable = wanted;
442 mutex_unlock(&devfreq->lock);
446 static ssize_t show_enable(struct device *dev, struct device_attribute *attr,
449 struct devfreq *devfreq = to_devfreq(dev);
450 struct userspace_data *data;
453 mutex_lock(&devfreq->lock);
454 data = devfreq->data;
456 err = sprintf(buf, "%d \n", data->enable);
457 mutex_unlock(&devfreq->lock);
461 static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
462 const char *buf, size_t count)
464 struct devfreq *devfreq = to_devfreq(dev);
465 struct userspace_data *data;
466 unsigned long wanted;
469 sscanf(buf, "%lu", &wanted);
470 err = dfs_set_freq(wanted);
476 static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
479 struct devfreq *devfreq = to_devfreq(dev);
480 struct userspace_data *data;
483 mutex_lock(&devfreq->lock);
484 data = devfreq->data;
486 err = sprintf(buf, "%lu KHz, set count:%lu\n", data->set_freq, data->set_count);
487 mutex_unlock(&devfreq->lock);
491 static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
492 static DEVICE_ATTR(set_enable, 0644, show_enable, store_enable);
493 static DEVICE_ATTR(set_request, 0644, show_request, store_request);
494 static DEVICE_ATTR(set_upthreshold, 0644, show_upthreshold, store_upthreshold);
495 static DEVICE_ATTR(set_downdifferential, 0644, show_downdifferential, store_downdifferential);
496 static struct attribute *dev_entries[] = {
497 &dev_attr_set_freq.attr,
498 &dev_attr_set_enable.attr,
499 &dev_attr_set_request.attr,
500 &dev_attr_set_upthreshold.attr,
501 &dev_attr_set_downdifferential.attr,
504 static struct attribute_group dev_attr_group = {
506 .attrs = dev_entries,
509 static int devfreq_ondemand_start(struct devfreq *devfreq)
512 struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
521 data->upthreshold = DFO_UPTHRESHOLD;
522 data->downdifferential = DFO_DOWNDIFFERENCTIAL;
523 #if defined(CONFIG_ARCH_SCX30G)
524 data->enable = false;
528 data->devfreq_enable = true;
530 data->convert_bw_to_freq = devfreq->data;
531 pr_info("*** %s, data->convert_bw_to_freq:%pf ***\n", __func__, data->convert_bw_to_freq);
533 devfreq->data = data;
535 err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
536 #ifdef CONFIG_HAS_EARLYSUSPEND
537 register_early_suspend(&devfreq_early_suspend_desc);
539 * disable DFS before DISPC late resume
541 register_early_suspend(&devfreq_enable_desc);
543 spin_lock(&dfs_req_lock);
544 dfs_req_timeout = REQ_TIMEOUT_DEF;
545 spin_unlock(&dfs_req_lock);
550 static int devfreq_ondemand_stop(struct devfreq *devfreq)
554 kfree(devfreq->data);
560 /************ userspace interface *****************/
562 static int devfreq_ondemand_func(struct devfreq *df,
565 struct devfreq_dev_status stat;
566 int err = df->profile->get_dev_status(df->dev.parent, &stat);
567 unsigned long long a, b;
568 unsigned int dfso_upthreshold = DFO_UPTHRESHOLD;
569 unsigned int dfso_downdifferential = DFO_DOWNDIFFERENCTIAL;
570 struct userspace_data *data = df->data;
571 unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
572 unsigned long req_freq;
577 if(user_requests.req_quirk &&
578 stat.current_frequency==df->min_freq){
580 * QUIRK: only set frequency larger than minimum
582 *freq = df->min_freq + 1;
583 pr_debug("*** %s, req_quirk, freq:%lu, return ***\n", __func__, *freq);
587 * TODO: add request frequency
589 req_freq = user_requests.req_sum + user_requests.req_timeout;
592 if (data->enable==false || !(data->devfreq_enable) ||
593 data->set_freq || !gov_eb){
594 if(user_requests.ddr_freq_after_req == 0)
595 user_requests.ddr_freq_after_req = max;
596 *freq = (data->set_freq?data->set_freq:user_requests.ddr_freq_after_req);
597 pr_debug("*** %s, data->enable:%d, data->set_freq:%u, gov_eb:%d ***\n",
598 __func__, data->enable, data->set_freq, gov_eb );
601 if (data->upthreshold)
602 dfso_upthreshold = data->upthreshold;
603 if (data->downdifferential)
604 dfso_downdifferential = data->downdifferential;
606 printk("*** %s, data is NULL ***\n", __func__ );
609 if (dfso_upthreshold > 100 ||
610 dfso_upthreshold < dfso_downdifferential){
611 printk("*** %s, dfso_upthreshold:%u, dfso_downdifferential:%u ***\n",
612 __func__, dfso_upthreshold, dfso_downdifferential );
616 /* Assume MAX if it is going to be divided by zero */
617 if (stat.total_time == 0) {
619 user_requests.ddr_freq_after_req = *freq;
620 pr_debug("*** %s, stat.total_time == 0, freq:%lu ***\n", __func__, *freq);
624 /* Prevent overflow */
625 if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
626 stat.busy_time >>= 7;
627 stat.total_time >>= 7;
630 /* Set MAX if it's busy enough */
631 if (stat.busy_time * 100 >
632 stat.total_time * dfso_upthreshold) {
634 user_requests.ddr_freq_after_req = *freq;
635 pr_debug("*** %s, set max freq:%lu ***\n", __func__, *freq);
639 /* Set MAX if we do not know the initial frequency */
640 if (stat.current_frequency == 0) {
642 user_requests.ddr_freq_after_req = *freq;
643 pr_debug("*** %s, stat.current_frequency == 0, freq:%lu ***\n", __func__, *freq);
647 /* Keep the current frequency */
648 if (stat.busy_time * 100 >
649 stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
650 *freq = stat.current_frequency + req_freq;
651 user_requests.ddr_freq_after_req = *freq;
652 pr_debug("*** %s, Keep the current frequency %lu, req_freq:%lu ***\n",
653 __func__, stat.current_frequency, req_freq);
657 /* Set the desired frequency based on the load */
659 a *= stat.current_frequency;
660 b = div_u64(a, stat.total_time);
662 b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
663 *freq = (unsigned long) b + req_freq;
664 user_requests.ddr_freq_after_req = *freq;
665 pr_debug("*** %s, calculate freq:%lu, req_freq:%lu ***\n",
666 __func__, (unsigned long)b, req_freq);
668 if (df->min_freq && *freq < df->min_freq)
669 *freq = df->min_freq;
670 if (df->max_freq && *freq > df->max_freq)
671 *freq = df->max_freq;
676 static int devfreq_ondemand_handler(struct devfreq *devfreq,
677 unsigned int event, void *data)
680 case DEVFREQ_GOV_START:
681 devfreq_ondemand_start(devfreq);
682 devfreq_monitor_start(devfreq);
685 case DEVFREQ_GOV_STOP:
686 devfreq_monitor_stop(devfreq);
687 //devfreq_ondemand_stop(devfreq);
690 case DEVFREQ_GOV_INTERVAL:
691 devfreq_interval_update(devfreq, (unsigned int *)data);
694 case DEVFREQ_GOV_SUSPEND:
695 devfreq_monitor_suspend(devfreq);
698 case DEVFREQ_GOV_RESUME:
699 devfreq_monitor_resume(devfreq);
709 const struct devfreq_governor devfreq_ondemand = {
711 .get_target_freq = devfreq_ondemand_func,
712 .event_handler = devfreq_ondemand_handler,
715 static int __init devfreq_ondemand_init(void)
717 return devfreq_add_governor(&devfreq_ondemand);
719 subsys_initcall(devfreq_ondemand_init);
721 static void __exit devfreq_ondemand_exit(void)
725 ret = devfreq_remove_governor(&devfreq_ondemand);
727 pr_err("%s: failed remove governor %d\n", __func__, ret);
731 module_exit(devfreq_ondemand_exit);
732 MODULE_LICENSE("GPL");