2 * drivers/cpufreq/cpufreq_sprdemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpufreq.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/kobject.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/percpu-defs.h>
23 #include <linux/slab.h>
24 #include <linux/sysfs.h>
25 #include <linux/tick.h>
26 #include <linux/types.h>
27 #include <linux/cpu.h>
28 #include <linux/thermal.h>
29 #include <linux/err.h>
30 #include <linux/earlysuspend.h>
31 #include <linux/suspend.h>
32 #include <asm/cacheflush.h>
33 #include <linux/kthread.h>
34 #include <linux/delay.h>
36 #include "cpufreq_governor.h"
37 #include <linux/input.h>
38 #include <linux/sprd_cpu_cooling.h>
39 #include <linux/platform_device.h>
41 #include <linux/of_device.h>
44 /* On-demand governor macros */
45 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
46 #define DEF_FREQUENCY_UP_THRESHOLD (80)
47 #define DEF_SAMPLING_DOWN_FACTOR (1)
48 #define MAX_SAMPLING_DOWN_FACTOR (100000)
49 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (10)
50 #define MICRO_FREQUENCY_UP_THRESHOLD (80)
51 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
52 #define MIN_FREQUENCY_UP_THRESHOLD (11)
53 #define MAX_FREQUENCY_UP_THRESHOLD (100)
55 /* whether plugin cpu according to this score up threshold */
56 #define DEF_CPU_SCORE_UP_THRESHOLD (100)
57 /* whether unplug cpu according to this down threshold*/
58 #define DEF_CPU_LOAD_DOWN_THRESHOLD (30)
59 #define DEF_CPU_DOWN_COUNT (3)
61 #define LOAD_CRITICAL 100
67 #define LOAD_CRITICAL_SCORE 10
68 #define LOAD_HI_SCORE 5
69 #define LOAD_MID_SCORE 0
70 #define LOAD_LIGHT_SCORE -10
71 #define LOAD_LO_SCORE -20
73 #define DEF_CPU_UP_MID_THRESHOLD (80)
74 #define DEF_CPU_UP_HIGH_THRESHOLD (90)
75 #define DEF_CPU_DOWN_MID_THRESHOLD (30)
76 #define DEF_CPU_DOWN_HIGH_THRESHOLD (40)
78 #define GOVERNOR_BOOT_TIME (50*HZ)
79 static unsigned long boot_done;
81 unsigned int cpu_hotplug_disable_set = false;
82 static int g_is_suspend = false;
85 struct unplug_work_info {
87 struct delayed_work unplug_work;
88 struct dbs_data *dbs_data;
90 static DEFINE_PER_CPU(struct unplug_work_info, uwi);
93 struct delayed_work plugin_work;
94 struct delayed_work unplug_work;
95 struct work_struct thm_unplug_work;
96 struct work_struct plugin_all_work;
97 struct work_struct unplug_all_work;
98 static int cpu_num_limit_temp;
99 static void sprd_thm_unplug_cpu(struct work_struct *work);
101 static DEFINE_PER_CPU(struct unplug_work_info, uwi);
103 static DEFINE_SPINLOCK(g_lock);
104 static unsigned int percpu_total_load[CONFIG_NR_CPUS] = {0};
105 static unsigned int percpu_check_count[CONFIG_NR_CPUS] = {0};
106 static int cpu_score = 0;
108 /* FIXME. default touch boost is enabled */
109 #define CONFIG_TOUCH_BOOST
111 #ifdef CONFIG_TOUCH_BOOST
112 static struct task_struct *ksprd_tb;
113 atomic_t g_atomic_tb_cnt = ATOMIC_INIT(0);
114 struct semaphore tb_sem;
115 static unsigned long tp_time;
118 static struct workqueue_struct *input_wq;
119 static struct work_struct dbs_refresh_work;
124 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, sd_cpu_dbs_info);
126 static struct od_ops sd_ops;
128 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
129 static struct cpufreq_governor cpufreq_gov_sprdemand;
132 static void update_sampling_rate(struct dbs_data *dbs_data, unsigned int new_rate);
134 static void sprdemand_powersave_bias_init_cpu(int cpu)
136 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info, cpu);
138 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
139 dbs_info->freq_lo = 0;
143 * Not all CPUs want IO time to be accounted as busy; this depends on how
144 * efficient idling at a higher frequency/voltage is.
145 * Pavel Machek says this is not so for various generations of AMD and old
147 * Mike Chan (android.com) claims this is also not true for ARM.
148 * Because of this, whitelist specific known (series) of CPUs by default, and
149 * leave all others up to the user.
151 static int should_io_be_busy(void)
153 #if defined(CONFIG_X86)
155 * For Intel, Core 2 (model 15) and later have an efficient idle.
157 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
158 boot_cpu_data.x86 == 6 &&
159 boot_cpu_data.x86_model >= 15)
165 struct sd_dbs_tuners *g_sd_tuners = NULL;
166 int cpu_core_thermal_limit(int cluster, int max_core)
169 struct sd_dbs_tuners *sd_tuners = g_sd_tuners;
173 if (sd_tuners->cpu_num_limit <= max_core) {
174 sd_tuners->cpu_num_limit = max_core;
177 sd_tuners->cpu_num_limit = max_core;
178 schedule_work_on(0, &thm_unplug_work);
184 * Find right freq to be set now with powersave_bias on.
185 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
186 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
188 static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
189 unsigned int freq_next, unsigned int relation)
191 unsigned int freq_req, freq_reduc, freq_avg;
192 unsigned int freq_hi, freq_lo;
193 unsigned int index = 0;
194 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
195 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info,
197 struct dbs_data *dbs_data = policy->governor_data;
198 struct sd_dbs_tuners *sd_tuners = NULL;
200 if (NULL == dbs_data) {
201 pr_info("generic_powersave_bias_target governor %s return\n", policy->governor->name);
202 if (g_sd_tuners == NULL)
204 sd_tuners = g_sd_tuners;
206 sd_tuners = dbs_data->tuners;
209 if (!dbs_info->freq_table) {
210 dbs_info->freq_lo = 0;
211 dbs_info->freq_lo_jiffies = 0;
215 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
217 freq_req = dbs_info->freq_table[index].frequency;
218 freq_reduc = freq_req * sd_tuners->powersave_bias / 1000;
219 freq_avg = freq_req - freq_reduc;
221 /* Find freq bounds for freq_avg in freq_table */
223 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
224 CPUFREQ_RELATION_H, &index);
225 freq_lo = dbs_info->freq_table[index].frequency;
227 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
228 CPUFREQ_RELATION_L, &index);
229 freq_hi = dbs_info->freq_table[index].frequency;
231 /* Find out how long we have to be in hi and lo freqs */
232 if (freq_hi == freq_lo) {
233 dbs_info->freq_lo = 0;
234 dbs_info->freq_lo_jiffies = 0;
237 jiffies_total = usecs_to_jiffies(sd_tuners->sampling_rate);
238 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
239 jiffies_hi += ((freq_hi - freq_lo) / 2);
240 jiffies_hi /= (freq_hi - freq_lo);
241 jiffies_lo = jiffies_total - jiffies_hi;
242 dbs_info->freq_lo = freq_lo;
243 dbs_info->freq_lo_jiffies = jiffies_lo;
244 dbs_info->freq_hi_jiffies = jiffies_hi;
248 static void sprdemand_powersave_bias_init(void)
251 for_each_online_cpu(i) {
252 sprdemand_powersave_bias_init_cpu(i);
256 static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
258 struct dbs_data *dbs_data = p->governor_data;
259 struct sd_dbs_tuners *sd_tuners = NULL;
261 if (NULL == dbs_data) {
262 pr_info("dbs_freq_increase governor %s return\n", p->governor->name);
263 if (g_sd_tuners == NULL)
265 sd_tuners = g_sd_tuners;
267 sd_tuners = dbs_data->tuners;
270 if (sd_tuners->powersave_bias)
271 freq = sd_ops.powersave_bias_target(p, freq,
273 else if (p->cur == p->max)
276 __cpufreq_driver_target(p, freq, sd_tuners->powersave_bias ?
277 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
280 static void sprd_unplug_one_cpu(struct work_struct *work)
282 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
283 struct dbs_data *dbs_data = policy->governor_data;
284 struct sd_dbs_tuners *sd_tuners = NULL;
287 if (NULL == dbs_data) {
288 pr_info("sprd_unplug_one_cpu return\n");
289 if (g_sd_tuners == NULL)
291 sd_tuners = g_sd_tuners;
293 sd_tuners = dbs_data->tuners;
296 #ifdef CONFIG_HOTPLUG_CPU
297 if (num_online_cpus() > 1) {
298 if (!sd_tuners->cpu_hotplug_disable) {
299 cpuid = cpumask_next(0, cpu_online_mask);
300 pr_debug("!! we gonna unplug cpu%d !!\n", cpuid);
308 static void sprd_plugin_one_cpu(struct work_struct *work)
311 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
312 struct dbs_data *dbs_data = policy->governor_data;
313 struct sd_dbs_tuners *sd_tuners = NULL;
315 if (NULL == dbs_data) {
316 pr_info("sprd_plugin_one_cpu return\n");
317 if (g_sd_tuners == NULL)
319 sd_tuners = g_sd_tuners;
321 sd_tuners = dbs_data->tuners;
324 #ifdef CONFIG_HOTPLUG_CPU
325 if (num_online_cpus() < sd_tuners->cpu_num_limit) {
326 cpuid = cpumask_next_zero(0, cpu_online_mask);
327 if (!sd_tuners->cpu_hotplug_disable) {
328 pr_debug("!! we gonna plugin cpu%d !!\n", cpuid);
336 static void sprd_unplug_all_cpu(struct work_struct *work)
338 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
339 struct dbs_data *dbs_data = policy->governor_data;
340 struct sd_dbs_tuners *sd_tuners = NULL;
343 if (NULL == dbs_data) {
344 pr_info("sprd_unplug_all_cpu return\n");
345 if (g_sd_tuners == NULL)
347 sd_tuners = g_sd_tuners;
349 sd_tuners = dbs_data->tuners;
352 #ifdef CONFIG_HOTPLUG_CPU
353 if (num_online_cpus() > 1) {
354 for_each_online_cpu(cpu) {
357 pr_info("!! all gonna unplug cpu%d !!\n", cpu);
365 static void sprd_plugin_all_cpu(struct work_struct *work)
368 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
369 struct dbs_data *dbs_data = policy->governor_data;
370 struct sd_dbs_tuners *sd_tuners = NULL;
372 if (NULL == dbs_data) {
373 pr_info("sprd_plugin_all_cpu return\n");
374 if (g_sd_tuners == NULL)
376 sd_tuners = g_sd_tuners;
378 sd_tuners = dbs_data->tuners;
381 #ifdef CONFIG_HOTPLUG_CPU
382 if (num_online_cpus() < sd_tuners->cpu_num_limit) {
383 for_each_possible_cpu(cpu) {
384 if (!cpu_online(cpu)) {
385 pr_info("!! all gonna plugin cpu%d !!\n",
395 unsigned int percpu_load[4] = {0};
396 #define MAX_CPU_NUM (4)
397 #define MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE (10)
398 #define MAX_PLUG_AVG_LOAD_SIZE (2)
400 unsigned int ga_percpu_total_load[MAX_CPU_NUM][MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE] = {{0}};
401 extern unsigned int dvfs_unplug_select;
402 extern unsigned int dvfs_plug_select;
404 unsigned int cur_window_size[MAX_CPU_NUM] ={0};
405 unsigned int prev_window_size[MAX_CPU_NUM] ={0};
407 int cur_window_index[MAX_CPU_NUM] = {0};
408 unsigned int cur_window_cnt[MAX_CPU_NUM] = {0};
409 int first_window_flag[4] = {0};
411 unsigned int sum_load[4] = {0};
413 unsigned int plug_avg_load[MAX_CPU_NUM][MAX_PLUG_AVG_LOAD_SIZE] = {{50}};
414 unsigned int plug_avg_load_index[MAX_CPU_NUM] = {0};
416 #define mod(n, div) ((n) % (div))
418 extern unsigned int dvfs_score_select;
419 extern unsigned int dvfs_score_hi[4];
420 extern unsigned int dvfs_score_mid[4];
421 extern unsigned int dvfs_score_critical[4];
423 int a_score_sub[4][4][11]=
426 {0,0,0,0,0,0,0,0,5,5,10},
427 {-5,-5,0,0,0,0,0,0,0,5,5},
428 {-10,-5,0,0,0,0,0,0,0,5,5},
429 {0,0,0,0,0,0,0,0,0,0,0}
432 {0,0,0,0,0,0,0,0,13,13,30},
433 {-9,-9,0,0,0,0,0,0,9,9,10},
434 {-18,-9,0,0,0,0,0,0,4,5,9},
435 {0,0,0,0,0,0,0,0,0,0,0}
438 {0,0,0,0,0,0,0,10,20,20,30},
439 {0,0,0,0,0,0,0,5,10,10,20},
440 {0,0,0,0,0,0,0,0,5,5,10},
441 {0,0,0,0,0,0,0,0,0,0,0}
444 {0,0,0,0,0,0,0,0,30,30,50},
445 {-20,-20,0,0,0,0,0,0,20,20,30},
446 {-40,-20,0,0,0,0,0,0,5,10,20},
447 {0,0,0,0,0,0,0,0,0,0,0}
451 int ga_samp_rate[11] = {100000,100000,100000,100000,100000,100000,50000,50000,30000,30000,30000};
453 unsigned int a_sub_windowsize[8][6] =
465 static int cpu_evaluate_score(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
468 static int rate[4] = {1};
470 int a_samp_rate[5] = {30000,30000,50000,50000,50000};
472 if(dvfs_score_select < 4)
474 if (load >= sd_tunners->load_critical)
476 score = dvfs_score_critical[num_online_cpus()];
477 sd_tunners->sampling_rate = a_samp_rate[0];
479 else if (load >= sd_tunners->load_hi)
481 score = dvfs_score_hi[num_online_cpus()];
482 sd_tunners->sampling_rate = a_samp_rate[1];
484 else if (load >= sd_tunners->load_mid)
486 score = dvfs_score_mid[num_online_cpus()];
487 sd_tunners->sampling_rate = a_samp_rate[2];
489 else if (load >= sd_tunners->load_light)
491 score = sd_tunners->load_light_score;
492 sd_tunners->sampling_rate = a_samp_rate[3];
494 else if (load >= sd_tunners->load_lo)
496 score = sd_tunners->load_lo_score;
497 sd_tunners->sampling_rate = a_samp_rate[4];
502 sd_tunners->sampling_rate = a_samp_rate[4];
508 delta = abs(percpu_load[cpu] - load);
512 if (unlikely(rate[cpu] > 100))
516 score = a_score_sub[dvfs_score_select % 4][num_online_cpus() - 1][load/10] * rate[cpu];
521 score = a_score_sub[dvfs_score_select % 4][num_online_cpus() - 1][load/10];
525 pr_debug("[DVFS SCORE] rate[%d] %d load %d score %d\n",cpu,rate[cpu],load,score);
531 static int sd_adjust_window(struct sd_dbs_tuners *sd_tunners , unsigned int load)
533 unsigned int cur_window_size = 0;
535 if (load >= sd_tunners->load_critical)
536 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][0];
537 else if (load >= sd_tunners->load_hi)
538 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][1];
539 else if (load >= sd_tunners->load_mid)
540 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][2];
541 else if (load >= sd_tunners->load_light)
542 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][3];
543 else if (load >= sd_tunners->load_lo)
544 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][4];
546 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][5];
548 return cur_window_size;
551 static unsigned int sd_unplug_avg_load(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
554 unsigned int sum_idx_hi = 0;
555 unsigned int * p_valid_pos = NULL;
556 unsigned int sum_load = 0;
559 initialize the window size for the first time
561 if(!cur_window_size[cpu])
563 cur_window_size[cpu] = sd_adjust_window(sd_tunners,load);
564 pr_debug("[DVFS_UNPLUG]cur_window_size[%d] = %d\n",cpu,cur_window_size[cpu]);
570 record the load in the percpu array
572 ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
573 cur_window_cnt[cpu]++;
575 update the windw index
577 cur_window_index[cpu]++;
578 cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
581 window array is not full, break
583 if(cur_window_cnt[cpu] < cur_window_size[cpu])
590 adjust the window index for it be added one more extra time
592 if(!cur_window_index[cpu])
594 cur_window_index[cpu] = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - 1;
598 cur_window_index[cpu]--;
601 find the valid position according to current window size and indexs
603 p_valid_pos = (unsigned int *)&ga_percpu_total_load[cpu][cur_window_index[cpu]];
605 calculate the average load value by decrease the index, for we need the very updated value which locate in the end of the array
607 for(sum_idx_lo = 0; sum_idx_lo < cur_window_size[cpu]; sum_idx_lo++)
610 calculate the lower part
612 if((cur_window_index[cpu] - sum_idx_lo) >=0)
614 sum_load += *(unsigned int *)((unsigned int)p_valid_pos - sum_idx_lo * sizeof(p_valid_pos));
619 calculate the higher part
621 sum_idx_hi = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - (cur_window_size[cpu] - sum_idx_lo);
622 for(; sum_idx_hi < MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE; sum_idx_hi++)
624 sum_load += ga_percpu_total_load[cpu][sum_idx_hi];
631 sum_load = sum_load / cur_window_size[cpu];
633 adjust the window according to previews load
635 cur_window_size[cpu] = sd_adjust_window(sd_tunners, sum_load);
636 cur_window_cnt[cpu] = 0;
637 pr_debug("[DVFS_UNPLUG]cur_window_size %d sum_load %d\n",cur_window_size[cpu],sum_load);
645 static unsigned int sd_unplug_avg_load1(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
648 int cur_window_pos = 0;
649 int cur_window_pos_tail = 0;
653 initialize the window size for the first time
654 cur_window_cnt[cpu] will be cleared when the core is unpluged
656 if((!first_window_flag[cpu])
657 ||(!cur_window_size[cpu]))
659 if(!cur_window_size[cpu])
661 cur_window_size[cpu] = sd_adjust_window(sd_tunners,load);
662 prev_window_size[cpu] = cur_window_size[cpu];
664 if(cur_window_cnt[cpu] < (cur_window_size[cpu] - 1))
667 record the load in the percpu array
669 ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
671 update the windw index
673 cur_window_index[cpu]++;
674 cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
676 cur_window_cnt[cpu]++;
678 sum_load[cpu] += load;
684 first_window_flag[cpu] = 1;
688 record the load in the percpu array
690 ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
692 update the windw index
694 cur_window_index[cpu]++;
695 cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
698 adjust the window index for it be added one more extra time
700 if(!cur_window_index[cpu])
702 cur_window_pos = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - 1;
706 cur_window_pos = cur_window_index[cpu] - 1;
710 tail = (c_w_p + max_window_size - c_w_s) % max_window_size
711 tail = (2 + 8 - 5) % 8 = 5
712 tail = (6 + 8 - 5) % 8 = 1
714 cur_window_pos_tail = mod(MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE + cur_window_pos - cur_window_size[cpu],MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
717 no window size change
719 if(prev_window_size[cpu] == cur_window_size[cpu] )
721 sum_load[cpu] = sum_load[cpu] + ga_percpu_total_load[cpu][cur_window_pos] - ga_percpu_total_load[cpu][cur_window_pos_tail] ;
726 window size change, recalculate the sum load
729 while(idx < cur_window_size[cpu])
731 sum_load[cpu] += ga_percpu_total_load[cpu][mod(cur_window_pos_tail + 1 +idx,MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE)];
735 avg_load = sum_load[cpu] / cur_window_size[cpu];
737 percpu_load[cpu] = avg_load;
739 prev_window_size[cpu] = cur_window_size[cpu];
741 cur_window_size[cpu] = (load > avg_load) ? sd_adjust_window(sd_tunners, load) : prev_window_size[cpu];
743 sd_tunners->sampling_rate = ga_samp_rate[mod(avg_load/10,11)];
745 pr_debug("[DVFS_UNPLUG]sum_load[%d]=%d tail[%d]=%d cur[%d]=%d cur_window_size %d load %d avg_load %d\n",cpu,sum_load[cpu],cur_window_pos_tail,
746 ga_percpu_total_load[cpu][cur_window_pos_tail],cur_window_pos,ga_percpu_total_load[cpu][cur_window_pos],cur_window_size[cpu],load,avg_load);
749 pr_info("cur_window_pos %d cur_window_pos_tail %d load %d sum_load %d\n",cur_window_pos,cur_window_pos_tail,load,sum_load[cpu] );
755 static unsigned int sd_unplug_avg_load11(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
758 int cur_window_pos = 0;
759 int cur_window_pos_tail = 0;
762 initialize the window size for the first time
763 cur_window_cnt[cpu] will be cleared when the core is unpluged
765 if((!first_window_flag[cpu])
766 ||(!cur_window_size[cpu]))
768 if(!cur_window_size[cpu])
770 cur_window_size[cpu] = sd_adjust_window(sd_tunners,load);
771 prev_window_size[cpu] = cur_window_size[cpu];
773 if(cur_window_cnt[cpu] < (cur_window_size[cpu] - 1))
776 record the load in the percpu array
778 ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
780 update the windw index
782 cur_window_index[cpu]++;
783 cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
785 cur_window_cnt[cpu]++;
787 sum_load[cpu] += load;
793 first_window_flag[cpu] = 1;
797 record the load in the percpu array
799 ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
801 update the windw index
803 cur_window_index[cpu]++;
804 cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
807 adjust the window index for it be added one more extra time
809 if(!cur_window_index[cpu])
811 cur_window_pos = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - 1;
815 cur_window_pos = cur_window_index[cpu] - 1;
819 tail = (c_w_p + max_window_size - c_w_s) % max_window_size
820 tail = (2 + 8 - 5) % 8 = 5
821 tail = (6 + 8 - 5) % 8 = 1
823 cur_window_pos_tail = mod(MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE + cur_window_pos - cur_window_size[cpu],MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
826 sum load = current load + current new data - tail data
828 sum_load[cpu] = sum_load[cpu] + ga_percpu_total_load[cpu][cur_window_pos] - ga_percpu_total_load[cpu][cur_window_pos_tail] ;
831 calc the average load
833 avg_load = sum_load[cpu] / cur_window_size[cpu];
835 percpu_load[cpu] = avg_load;
837 sd_tunners->sampling_rate = ga_samp_rate[mod(avg_load/10,11)];
842 #define MAX_ARRAY_SIZE (10)
843 #define LOAD_WINDOW_SIZE (3)
844 unsigned int load_array[CONFIG_NR_CPUS][MAX_ARRAY_SIZE] = { {0} };
845 unsigned int window_index[CONFIG_NR_CPUS] = {0};
847 static unsigned int sd_avg_load(int cpu, struct sd_dbs_tuners *sd_tuners,
852 unsigned int sum_scale = 0;
853 unsigned int sum_load = 0;
854 unsigned int window_tail = 0, window_head = 0;
856 load_array[cpu][window_index[cpu]] = load;
858 window_index[cpu] = mod(window_index[cpu], MAX_ARRAY_SIZE);
859 if(!window_index[cpu])
860 window_tail = MAX_ARRAY_SIZE - 1;
862 window_tail = window_index[cpu] - 1;
864 window_head = mod(MAX_ARRAY_SIZE + window_tail -
865 sd_tuners->window_size + 1, MAX_ARRAY_SIZE);
866 for (scale = 1, count = 0; count < sd_tuners->window_size;
867 scale += scale, count++) {
868 pr_debug("load_array[%d][%d]: %d, scale: %d\n",
869 cpu, window_head, load_array[cpu][window_head], scale);
870 sum_load += (load_array[cpu][window_head] * scale);
873 window_head = mod(window_head, MAX_ARRAY_SIZE);
876 return sum_load / sum_scale;
880 * Every sampling_rate, we check, if current idle time is less than 20%
881 * (default), then we try to increase frequency. Every sampling_rate, we look
882 * for the lowest frequency which can sustain the load while keeping idle time
883 * over 30%. If such a frequency exist, we try to decrease to this frequency.
885 * Any frequency increase takes it to the maximum frequency. Frequency reduction
886 * happens at minimum steps of 5% (default) of current frequency
888 static void sd_check_cpu(int cpu, unsigned int load)
890 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info, cpu);
891 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
892 struct dbs_data *dbs_data = policy->governor_data;
893 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
894 unsigned int itself_avg_load = 0;
897 if (time_before(jiffies, boot_done))
900 local_cpu = smp_processor_id();
905 /* skip cpufreq adjustment if system enter into suspend */
906 if (true == sd_tuners->is_suspend) {
907 pr_info("%s: is_suspend=%s, skip cpufreq adjust\n",
908 __func__, sd_tuners->is_suspend?"true":"false");
912 dbs_info->freq_lo = 0;
913 pr_debug("efficient load %d, cur freq %d, online CPUs %d\n",
914 load, policy->cur, num_online_cpus());
916 #ifdef CONFIG_TOUCH_BOOST
917 if (atomic_read(&g_atomic_tb_cnt)) {
918 atomic_sub_return(1, &g_atomic_tb_cnt);
923 /* Check for frequency increase */
924 if (load > sd_tuners->up_threshold) {
925 /* If switching to max speed, apply sampling_down_factor */
926 if (policy->cur < policy->max)
927 dbs_info->rate_mult =
928 sd_tuners->sampling_down_factor;
929 if (num_online_cpus() == sd_tuners->cpu_num_limit)
930 dbs_freq_increase(policy, policy->max);
932 dbs_freq_increase(policy, policy->max-1);
936 /* Check for frequency decrease */
937 /* if we cannot reduce the frequency anymore, break out early */
938 if (policy->cur == policy->min)
942 * The optimal frequency is the frequency that is the lowest that can
943 * support the current CPU usage without triggering the up policy. To be
944 * safe, we focus 3 points under the threshold.
946 if (load < sd_tuners->adj_up_threshold) {
947 unsigned int freq_next;
948 unsigned int load_freq;
949 load_freq = load * policy->cur;
950 freq_next = load_freq / sd_tuners->adj_up_threshold;
951 /* No longer fully busy, reset rate_mult */
952 dbs_info->rate_mult = 1;
954 if (freq_next < policy->min)
955 freq_next = policy->min;
957 if (!sd_tuners->powersave_bias) {
958 __cpufreq_driver_target(policy, freq_next,
963 freq_next = sd_ops.powersave_bias_target(policy, freq_next,
965 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
971 /* skip cpu hotplug check if hotplug is disabled */
972 if (sd_tuners->cpu_hotplug_disable)
975 /* cpu plugin check */
976 itself_avg_load = sd_avg_load(cpu, sd_tuners, load);
977 pr_debug(" itself_avg_load %d\n", itself_avg_load);
978 if (num_online_cpus() < sd_tuners->cpu_num_limit) {
979 int cpu_up_threshold;
981 if (num_online_cpus() == 1)
982 cpu_up_threshold = sd_tuners->cpu_up_mid_threshold;
984 cpu_up_threshold = sd_tuners->cpu_up_high_threshold;
986 if (itself_avg_load > cpu_up_threshold) {
987 schedule_delayed_work_on(0, &plugin_work, 0);
992 /* cpu unplug check */
993 if (num_online_cpus() > 1) {
994 int cpu_down_threshold;
996 if (num_online_cpus() > 2)
997 cpu_down_threshold = sd_tuners->cpu_down_high_threshold;
999 cpu_down_threshold = sd_tuners->cpu_down_mid_threshold;
1001 if (itself_avg_load < cpu_down_threshold)
1002 schedule_delayed_work_on(0, &unplug_work, 0);
1006 itself_avg_load = sd_unplug_avg_load1(local_cpu, sd_tuners, load);
1007 /* cpu plugin check */
1008 if(num_online_cpus() < sd_tuners->cpu_num_limit) {
1009 cpu_score += cpu_evaluate_score(policy->cpu,sd_tuners, itself_avg_load);
1012 if (cpu_score >= sd_tuners->cpu_score_up_threshold) {
1013 pr_debug("cpu_score=%d, begin plugin cpu!\n", cpu_score);
1015 schedule_delayed_work_on(0, &plugin_work, 0);
1021 /* cpu unplug check */
1022 puwi = &per_cpu(uwi, local_cpu);
1023 if((num_online_cpus() > 1) && (dvfs_unplug_select == 1)){
1024 percpu_total_load[local_cpu] += load;
1025 percpu_check_count[local_cpu]++;
1026 if(percpu_check_count[cpu] == sd_tuners->cpu_down_count) {
1027 /* calculate itself's average load */
1028 itself_avg_load = percpu_total_load[local_cpu]/sd_tuners->cpu_down_count;
1029 pr_debug("check unplug: for cpu%u avg_load=%d\n", local_cpu, itself_avg_load);
1030 if(itself_avg_load < sd_tuners->cpu_down_threshold) {
1031 pr_info("cpu%u's avg_load=%d,begin unplug cpu\n",
1032 policy->cpu, itself_avg_load);
1033 schedule_delayed_work_on(0, &unplug_work, 0);
1035 percpu_check_count[local_cpu] = 0;
1036 percpu_total_load[local_cpu] = 0;
1039 else if((num_online_cpus() > 1) && (dvfs_unplug_select == 2))
1041 /* calculate itself's average load */
1042 pr_debug("check unplug: for cpu%u avg_load=%d\n", local_cpu, itself_avg_load);
1043 if(itself_avg_load < sd_tuners->cpu_down_threshold)
1045 pr_info("cpu%u's avg_load=%d,begin unplug cpu\n",
1046 local_cpu, itself_avg_load);
1047 percpu_load[local_cpu] = 0;
1048 cur_window_size[local_cpu] = 0;
1049 cur_window_index[local_cpu] = 0;
1050 cur_window_cnt[local_cpu] = 0;
1051 prev_window_size[local_cpu] = 0;
1052 first_window_flag[local_cpu] = 0;
1053 sum_load[local_cpu] = 0;
1054 memset(&ga_percpu_total_load[local_cpu][0],0,sizeof(int) * MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
1055 schedule_delayed_work_on(0, &unplug_work, 0);
1058 else if((num_online_cpus() > 1) && (dvfs_unplug_select > 2))
1060 /* calculate itself's average load */
1061 itself_avg_load = sd_unplug_avg_load11(local_cpu, sd_tuners, load);
1062 pr_debug("check unplug: for cpu%u avg_load=%d\n", local_cpu, itself_avg_load);
1063 if(itself_avg_load < sd_tuners->cpu_down_threshold)
1065 pr_info("cpu%u's avg_load=%d,begin unplug cpu\n",
1066 local_cpu, itself_avg_load);
1067 percpu_load[local_cpu] = 0;
1068 cur_window_size[local_cpu] = 0;
1069 cur_window_index[local_cpu] = 0;
1070 cur_window_cnt[local_cpu] = 0;
1071 prev_window_size[local_cpu] = 0;
1072 first_window_flag[local_cpu] = 0;
1073 sum_load[local_cpu] = 0;
1074 memset(&ga_percpu_total_load[local_cpu][0],0,sizeof(int) * MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
1075 schedule_delayed_work_on(0, &unplug_work, 0);
1081 static void sd_dbs_timer(struct work_struct *work)
1083 struct od_cpu_dbs_info_s *dbs_info =
1084 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
1085 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
1086 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(sd_cpu_dbs_info,
1088 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
1089 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1090 int delay = 0, sample_type = core_dbs_info->sample_type;
1091 bool modify_all = false;
1093 if (smp_processor_id())
1096 /* CPUFREQ_GOV_STOP will set cur_policy as NULL*/
1097 if (NULL == core_dbs_info->cdbs.cur_policy) {
1098 pr_err("%s cur_policy is cleared, just exit\n", __func__);
1102 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
1103 if (time_before(jiffies, boot_done))
1106 if (!need_load_eval(&core_dbs_info->cdbs, sd_tuners->sampling_rate)) {
1111 /* Common NORMAL_SAMPLE setup */
1112 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
1113 if (sample_type == OD_SUB_SAMPLE) {
1114 delay = core_dbs_info->freq_lo_jiffies;
1115 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
1116 core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
1118 dbs_check_cpu(dbs_data, cpu);
1119 if (core_dbs_info->freq_lo) {
1120 /* Setup timer for SUB_SAMPLE */
1121 core_dbs_info->sample_type = OD_SUB_SAMPLE;
1122 delay = core_dbs_info->freq_hi_jiffies;
1128 delay = delay_for_sampling_rate(sd_tuners->sampling_rate
1129 * core_dbs_info->rate_mult);
1131 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
1132 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
1135 /************************** sysfs interface ************************/
1136 static struct common_dbs_data sd_dbs_cdata;
1139 * update_sampling_rate - update sampling rate effective immediately if needed.
1140 * @new_rate: new sampling rate
1142 * If new rate is smaller than the old, simply updating
1143 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
1144 * original sampling_rate was 1 second and the requested new sampling rate is 10
1145 * ms because the user needs immediate reaction from ondemand governor, but not
1146 * sure if higher frequency will be required or not, then, the governor may
1147 * change the sampling rate too late; up to 1 second later. Thus, if we are
1148 * reducing the sampling rate, we need to make the new value effective
1151 static void update_sampling_rate(struct dbs_data *dbs_data,
1152 unsigned int new_rate)
1154 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1157 sd_tuners->sampling_rate = new_rate = max(new_rate,
1158 dbs_data->min_sampling_rate);
1160 for_each_online_cpu(cpu) {
1161 struct cpufreq_policy *policy;
1162 struct od_cpu_dbs_info_s *dbs_info;
1163 unsigned long next_sampling, appointed_at;
1165 policy = cpufreq_cpu_get(cpu);
1168 if (policy->governor != &cpufreq_gov_sprdemand) {
1169 cpufreq_cpu_put(policy);
1172 dbs_info = &per_cpu(sd_cpu_dbs_info, cpu);
1173 cpufreq_cpu_put(policy);
1175 mutex_lock(&dbs_info->cdbs.timer_mutex);
1177 if (!delayed_work_pending(&dbs_info->cdbs.work)) {
1178 mutex_unlock(&dbs_info->cdbs.timer_mutex);
1182 next_sampling = jiffies + usecs_to_jiffies(new_rate);
1183 appointed_at = dbs_info->cdbs.work.timer.expires;
1185 if (time_before(next_sampling, appointed_at)) {
1187 mutex_unlock(&dbs_info->cdbs.timer_mutex);
1188 cancel_delayed_work_sync(&dbs_info->cdbs.work);
1189 mutex_lock(&dbs_info->cdbs.timer_mutex);
1191 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
1192 usecs_to_jiffies(new_rate), true);
1195 mutex_unlock(&dbs_info->cdbs.timer_mutex);
1199 static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
1204 ret = sscanf(buf, "%u", &input);
1208 update_sampling_rate(dbs_data, input);
1212 static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
1215 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1220 ret = sscanf(buf, "%u", &input);
1223 sd_tuners->io_is_busy = !!input;
1225 /* we need to re-evaluate prev_cpu_idle */
1226 for_each_online_cpu(j) {
1227 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info,
1229 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
1230 &dbs_info->cdbs.prev_cpu_wall, sd_tuners->io_is_busy);
1235 static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
1238 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1241 ret = sscanf(buf, "%u", &input);
1243 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
1244 input < MIN_FREQUENCY_UP_THRESHOLD) {
1247 /* Calculate the new adj_up_threshold */
1248 sd_tuners->adj_up_threshold += input;
1249 sd_tuners->adj_up_threshold -= sd_tuners->up_threshold;
1251 sd_tuners->up_threshold = input;
1255 static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
1256 const char *buf, size_t count)
1258 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1259 unsigned int input, j;
1261 ret = sscanf(buf, "%u", &input);
1263 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
1265 sd_tuners->sampling_down_factor = input;
1267 /* Reset down sampling multiplier in case it was active */
1268 for_each_online_cpu(j) {
1269 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info,
1271 dbs_info->rate_mult = 1;
1276 static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
1279 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1285 ret = sscanf(buf, "%u", &input);
1292 if (input == sd_tuners->ignore_nice) { /* nothing to do */
1295 sd_tuners->ignore_nice = input;
1297 /* we need to re-evaluate prev_cpu_idle */
1298 for_each_online_cpu(j) {
1299 struct od_cpu_dbs_info_s *dbs_info;
1300 dbs_info = &per_cpu(sd_cpu_dbs_info, j);
1301 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
1302 &dbs_info->cdbs.prev_cpu_wall, sd_tuners->io_is_busy);
1303 if (sd_tuners->ignore_nice)
1304 dbs_info->cdbs.prev_cpu_nice =
1305 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
1311 static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
1314 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1317 ret = sscanf(buf, "%u", &input);
1325 sd_tuners->powersave_bias = input;
1326 sprdemand_powersave_bias_init();
1330 static ssize_t store_cpu_num_limit(struct dbs_data *dbs_data, const char *buf,
1333 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1336 ret = sscanf(buf, "%u", &input);
1341 sd_tuners->cpu_num_limit = input;
1345 static ssize_t store_cpu_score_up_threshold(struct dbs_data *dbs_data, const char *buf,
1348 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1351 ret = sscanf(buf, "%u", &input);
1356 sd_tuners->cpu_score_up_threshold = input;
1360 static ssize_t store_load_critical(struct dbs_data *dbs_data, const char *buf,
1363 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1366 ret = sscanf(buf, "%u", &input);
1371 sd_tuners->load_critical = input;
1375 static ssize_t store_load_hi(struct dbs_data *dbs_data, const char *buf,
1378 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1381 ret = sscanf(buf, "%u", &input);
1386 sd_tuners->load_hi = input;
1390 static ssize_t store_load_mid(struct dbs_data *dbs_data, const char *buf,
1393 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1396 ret = sscanf(buf, "%u", &input);
1401 sd_tuners->load_mid = input;
1405 static ssize_t store_load_light(struct dbs_data *dbs_data, const char *buf,
1408 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1411 ret = sscanf(buf, "%u", &input);
1416 sd_tuners->load_light = input;
1420 static ssize_t store_load_lo(struct dbs_data *dbs_data, const char *buf,
1423 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1426 ret = sscanf(buf, "%u", &input);
1431 sd_tuners->load_lo = input;
1435 static ssize_t store_load_critical_score(struct dbs_data *dbs_data, const char *buf,
1438 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1441 ret = sscanf(buf, "%d", &input);
1446 sd_tuners->load_critical_score = input;
1450 static ssize_t store_load_hi_score(struct dbs_data *dbs_data, const char *buf,
1453 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1456 ret = sscanf(buf, "%d", &input);
1461 sd_tuners->load_hi_score = input;
1466 static ssize_t store_load_mid_score(struct dbs_data *dbs_data, const char *buf,
1469 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1472 ret = sscanf(buf, "%d", &input);
1477 sd_tuners->load_mid_score = input;
1481 static ssize_t store_load_light_score(struct dbs_data *dbs_data, const char *buf,
1484 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1487 ret = sscanf(buf, "%d", &input);
1492 sd_tuners->load_light_score = input;
1496 static ssize_t store_load_lo_score(struct dbs_data *dbs_data, const char *buf,
1499 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1502 ret = sscanf(buf, "%d", &input);
1507 sd_tuners->load_lo_score = input;
1511 static ssize_t store_cpu_down_threshold(struct dbs_data *dbs_data, const char *buf,
1514 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1517 ret = sscanf(buf, "%u", &input);
1522 sd_tuners->cpu_down_threshold = input;
1526 static ssize_t store_cpu_down_count(struct dbs_data *dbs_data, const char *buf,
1529 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1532 ret = sscanf(buf, "%u", &input);
1537 sd_tuners->cpu_down_count = input;
1541 static ssize_t store_cpu_hotplug_disable(struct dbs_data *dbs_data, const char *buf,
1544 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1545 unsigned int input, cpu;
1547 ret = sscanf(buf, "%u", &input);
1553 if (sd_tuners->cpu_hotplug_disable == input) {
1556 if (sd_tuners->cpu_num_limit > 1)
1557 sd_tuners->cpu_hotplug_disable = input;
1559 if (sd_tuners->cpu_hotplug_disable > 0)
1560 cpu_hotplug_disable_set = true;
1562 cpu_hotplug_disable_set = false;
1565 /* plug-in all offline cpu mandatory if we didn't
1566 * enbale CPU_DYNAMIC_HOTPLUG
1568 #ifdef CONFIG_HOTPLUG_CPU
1569 if (sd_tuners->cpu_hotplug_disable &&
1570 num_online_cpus() < sd_tuners->cpu_num_limit) {
1571 schedule_work_on(0, &plugin_all_work);
1574 pr_debug("wait for all cpu online!\n");
1575 } while (num_online_cpus() < sd_tuners->cpu_num_limit);
1581 static ssize_t store_cpu_up_mid_threshold(struct dbs_data *dbs_data,
1582 const char *buf, size_t count)
1584 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1587 ret = sscanf(buf, "%u", &input);
1592 sd_tuners->cpu_up_mid_threshold = input;
1596 static ssize_t store_cpu_up_high_threshold(struct dbs_data *dbs_data,
1597 const char *buf, size_t count)
1599 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1602 ret = sscanf(buf, "%u", &input);
1607 sd_tuners->cpu_up_high_threshold = input;
1611 static ssize_t store_cpu_down_mid_threshold(struct dbs_data *dbs_data,
1612 const char *buf, size_t count)
1614 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1617 ret = sscanf(buf, "%u", &input);
1622 sd_tuners->cpu_down_mid_threshold = input;
1626 static ssize_t store_cpu_down_high_threshold(struct dbs_data *dbs_data,
1627 const char *buf, size_t count)
1629 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1632 ret = sscanf(buf, "%u", &input);
1637 sd_tuners->cpu_down_high_threshold = input;
1641 static ssize_t store_window_size(struct dbs_data *dbs_data,
1642 const char *buf, size_t count)
1644 struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1647 ret = sscanf(buf, "%u", &input);
1652 if (input > MAX_ARRAY_SIZE || input < 1)
1655 sd_tuners->window_size = input;
1659 show_store_one(sd, sampling_rate);
1660 show_store_one(sd, io_is_busy);
1661 show_store_one(sd, up_threshold);
1662 show_store_one(sd, sampling_down_factor);
1663 show_store_one(sd, ignore_nice);
1664 show_store_one(sd, powersave_bias);
1665 declare_show_sampling_rate_min(sd);
1666 show_store_one(sd, cpu_score_up_threshold);
1667 show_store_one(sd, load_critical);
1668 show_store_one(sd, load_hi);
1669 show_store_one(sd, load_mid);
1670 show_store_one(sd, load_light);
1671 show_store_one(sd, load_lo);
1672 show_store_one(sd, load_critical_score);
1673 show_store_one(sd, load_hi_score);
1674 show_store_one(sd, load_mid_score);
1675 show_store_one(sd, load_light_score);
1676 show_store_one(sd, load_lo_score);
1677 show_store_one(sd, cpu_down_threshold);
1678 show_store_one(sd, cpu_down_count);
1679 show_store_one(sd, cpu_hotplug_disable);
1680 show_store_one(sd, cpu_num_limit);
1681 show_store_one(sd, cpu_up_mid_threshold);
1682 show_store_one(sd, cpu_up_high_threshold);
1683 show_store_one(sd, cpu_down_mid_threshold);
1684 show_store_one(sd, cpu_down_high_threshold);
1685 show_store_one(sd, window_size);
1687 gov_sys_pol_attr_rw(sampling_rate);
1688 gov_sys_pol_attr_rw(io_is_busy);
1689 gov_sys_pol_attr_rw(up_threshold);
1690 gov_sys_pol_attr_rw(sampling_down_factor);
1691 gov_sys_pol_attr_rw(ignore_nice);
1692 gov_sys_pol_attr_rw(powersave_bias);
1693 gov_sys_pol_attr_ro(sampling_rate_min);
1694 gov_sys_pol_attr_rw(cpu_score_up_threshold);
1695 gov_sys_pol_attr_rw(load_critical);
1696 gov_sys_pol_attr_rw(load_hi);
1697 gov_sys_pol_attr_rw(load_mid);
1698 gov_sys_pol_attr_rw(load_light);
1699 gov_sys_pol_attr_rw(load_lo);
1700 gov_sys_pol_attr_rw(load_critical_score);
1701 gov_sys_pol_attr_rw(load_hi_score);
1702 gov_sys_pol_attr_rw(load_mid_score);
1703 gov_sys_pol_attr_rw(load_light_score);
1704 gov_sys_pol_attr_rw(load_lo_score);
1705 gov_sys_pol_attr_rw(cpu_down_threshold);
1706 gov_sys_pol_attr_rw(cpu_down_count);
1707 gov_sys_pol_attr_rw(cpu_hotplug_disable);
1708 gov_sys_pol_attr_rw(cpu_num_limit);
1709 gov_sys_pol_attr_rw(cpu_up_mid_threshold);
1710 gov_sys_pol_attr_rw(cpu_up_high_threshold);
1711 gov_sys_pol_attr_rw(cpu_down_mid_threshold);
1712 gov_sys_pol_attr_rw(cpu_down_high_threshold);
1713 gov_sys_pol_attr_rw(window_size);
1715 static struct attribute *dbs_attributes_gov_sys[] = {
1716 &sampling_rate_min_gov_sys.attr,
1717 &sampling_rate_gov_sys.attr,
1718 &up_threshold_gov_sys.attr,
1719 &sampling_down_factor_gov_sys.attr,
1720 &ignore_nice_gov_sys.attr,
1721 &powersave_bias_gov_sys.attr,
1722 &io_is_busy_gov_sys.attr,
1723 &cpu_score_up_threshold_gov_sys.attr,
1724 &load_critical_gov_sys.attr,
1725 &load_hi_gov_sys.attr,
1726 &load_mid_gov_sys.attr,
1727 &load_light_gov_sys.attr,
1728 &load_lo_gov_sys.attr,
1729 &load_critical_score_gov_sys.attr,
1730 &load_hi_score_gov_sys.attr,
1731 &load_mid_score_gov_sys.attr,
1732 &load_light_score_gov_sys.attr,
1733 &load_lo_score_gov_sys.attr,
1734 &cpu_down_threshold_gov_sys.attr,
1735 &cpu_down_count_gov_sys.attr,
1736 &cpu_hotplug_disable_gov_sys.attr,
1737 &cpu_num_limit_gov_sys.attr,
1738 &cpu_up_mid_threshold_gov_sys.attr,
1739 &cpu_up_high_threshold_gov_sys.attr,
1740 &cpu_down_mid_threshold_gov_sys.attr,
1741 &cpu_down_high_threshold_gov_sys.attr,
1742 &window_size_gov_sys.attr,
1746 static struct attribute_group sd_attr_group_gov_sys = {
1747 .attrs = dbs_attributes_gov_sys,
1748 .name = "sprdemand",
1751 static struct attribute *dbs_attributes_gov_pol[] = {
1752 &sampling_rate_min_gov_pol.attr,
1753 &sampling_rate_gov_pol.attr,
1754 &up_threshold_gov_pol.attr,
1755 &sampling_down_factor_gov_pol.attr,
1756 &ignore_nice_gov_pol.attr,
1757 &powersave_bias_gov_pol.attr,
1758 &io_is_busy_gov_pol.attr,
1759 &cpu_score_up_threshold_gov_pol.attr,
1760 &load_critical_gov_pol.attr,
1761 &load_hi_gov_pol.attr,
1762 &load_mid_gov_pol.attr,
1763 &load_light_gov_pol.attr,
1764 &load_lo_gov_pol.attr,
1765 &load_critical_score_gov_pol.attr,
1766 &load_hi_score_gov_pol.attr,
1767 &load_mid_score_gov_pol.attr,
1768 &load_light_score_gov_pol.attr,
1769 &load_lo_score_gov_pol.attr,
1770 &cpu_down_threshold_gov_pol.attr,
1771 &cpu_down_count_gov_pol.attr,
1772 &cpu_hotplug_disable_gov_pol.attr,
1773 &cpu_num_limit_gov_pol.attr,
1774 &cpu_up_mid_threshold_gov_pol.attr,
1775 &cpu_up_high_threshold_gov_pol.attr,
1776 &cpu_down_mid_threshold_gov_pol.attr,
1777 &cpu_down_high_threshold_gov_pol.attr,
1778 &window_size_gov_pol.attr,
1782 static struct attribute_group sd_attr_group_gov_pol = {
1783 .attrs = dbs_attributes_gov_pol,
1784 .name = "sprdemand",
1787 /************************** sysfs end ************************/
1789 static int sd_init(struct dbs_data *dbs_data)
1791 struct sd_dbs_tuners *tuners;
1795 tuners = kzalloc(sizeof(struct sd_dbs_tuners), GFP_KERNEL);
1798 pr_err("%s: kzalloc failed\n", __func__);
1803 idle_time = get_cpu_idle_time_us(cpu, NULL);
1805 if (idle_time != -1ULL) {
1806 /* Idle micro accounting is supported. Use finer thresholds */
1807 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1808 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
1809 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
1811 * In nohz/micro accounting case we set the minimum frequency
1812 * not depending on HZ, but fixed (very low). The deferred
1813 * timer might skip some samples if idle/sleeping as needed.
1815 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1817 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
1818 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
1819 DEF_FREQUENCY_DOWN_DIFFERENTIAL;
1821 /* For correct statistics, we need 10 ticks for each measure */
1822 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
1823 jiffies_to_usecs(10);
1826 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
1827 tuners->ignore_nice = 0;
1828 tuners->powersave_bias = 0;
1829 tuners->io_is_busy = should_io_be_busy();
1831 tuners->cpu_hotplug_disable = true;
1832 tuners->is_suspend = false;
1833 tuners->cpu_score_up_threshold = DEF_CPU_SCORE_UP_THRESHOLD;
1834 tuners->load_critical = LOAD_CRITICAL;
1835 tuners->load_hi = LOAD_HI;
1836 tuners->load_mid = LOAD_MID;
1837 tuners->load_light = LOAD_LIGHT;
1838 tuners->load_lo = LOAD_LO;
1839 tuners->load_critical_score = LOAD_CRITICAL_SCORE;
1840 tuners->load_hi_score = LOAD_HI_SCORE;
1841 tuners->load_mid_score = LOAD_MID_SCORE;
1842 tuners->load_light_score = LOAD_LIGHT_SCORE;
1843 tuners->load_lo_score = LOAD_LO_SCORE;
1844 tuners->cpu_down_threshold = DEF_CPU_LOAD_DOWN_THRESHOLD;
1845 tuners->cpu_down_count = DEF_CPU_DOWN_COUNT;
1846 tuners->cpu_up_mid_threshold = DEF_CPU_UP_MID_THRESHOLD;
1847 tuners->cpu_up_high_threshold = DEF_CPU_UP_HIGH_THRESHOLD;
1848 tuners->cpu_down_mid_threshold = DEF_CPU_DOWN_MID_THRESHOLD;
1849 tuners->cpu_down_high_threshold = DEF_CPU_DOWN_HIGH_THRESHOLD;
1850 tuners->window_size = LOAD_WINDOW_SIZE;
1851 tuners->cpu_num_limit = nr_cpu_ids;
1852 if (tuners->cpu_num_limit > 1)
1853 tuners->cpu_hotplug_disable = false;
1855 memcpy(g_sd_tuners,tuners,sizeof(struct sd_dbs_tuners));
1857 dbs_data->tuners = tuners;
1858 mutex_init(&dbs_data->mutex);
1860 INIT_DELAYED_WORK(&plugin_work, sprd_plugin_one_cpu);
1861 INIT_DELAYED_WORK(&unplug_work, sprd_unplug_one_cpu);
1862 INIT_WORK(&thm_unplug_work, sprd_thm_unplug_cpu);
1863 INIT_WORK(&plugin_all_work, sprd_plugin_all_cpu);
1864 INIT_WORK(&unplug_all_work, sprd_unplug_all_cpu);
1867 for_each_possible_cpu(i) {
1868 puwi = &per_cpu(uwi, i);
1870 puwi->dbs_data = dbs_data;
1871 INIT_DELAYED_WORK(&puwi->unplug_work, sprd_unplug_one_cpu);
1878 static void sd_exit(struct dbs_data *dbs_data)
1880 kfree(dbs_data->tuners);
1883 define_get_cpu_dbs_routines(sd_cpu_dbs_info);
1885 static struct od_ops sd_ops = {
1886 .powersave_bias_init_cpu = sprdemand_powersave_bias_init_cpu,
1887 .powersave_bias_target = generic_powersave_bias_target,
1888 .freq_increase = dbs_freq_increase,
1891 static struct common_dbs_data sd_dbs_cdata = {
1892 /* sprdemand belong to ondemand gov */
1893 .governor = GOV_ONDEMAND,
1894 .attr_group_gov_sys = &sd_attr_group_gov_sys,
1895 .attr_group_gov_pol = &sd_attr_group_gov_pol,
1896 .get_cpu_cdbs = get_cpu_cdbs,
1897 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
1898 .gov_dbs_timer = sd_dbs_timer,
1899 .gov_check_cpu = sd_check_cpu,
1905 static int sd_cpufreq_governor_dbs(struct cpufreq_policy *policy,
1908 return cpufreq_governor_dbs(policy, &sd_dbs_cdata, event);
1911 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
1914 struct cpufreq_governor cpufreq_gov_sprdemand = {
1915 .name = "sprdemand",
1916 .governor = sd_cpufreq_governor_dbs,
1917 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
1918 .owner = THIS_MODULE,
1921 static void sprd_thm_unplug_cpu(struct work_struct *work)
1923 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
1924 struct dbs_data *dbs_data = policy->governor_data;
1925 struct sd_dbs_tuners *sd_tuners = NULL;
1926 int cpuid, max_core, cpus, i;
1928 if(NULL == dbs_data)
1930 pr_info("%s return\n", __func__);
1931 if (g_sd_tuners == NULL)
1933 sd_tuners = g_sd_tuners;
1937 sd_tuners = dbs_data->tuners;
1940 #ifdef CONFIG_HOTPLUG_CPU
1941 cpus = num_online_cpus();
1942 max_core = sd_tuners->cpu_num_limit;
1943 for (i = 0; i < cpus - max_core; ++i){
1944 if (!sd_tuners->cpu_hotplug_disable) {
1945 cpuid = cpumask_next(0, cpu_online_mask);
1946 pr_info("!! we gonna unplug cpu%d !!\n", cpuid);
1947 if (cpu_down(cpuid)){
1948 pr_info("unplug cpu%d failed!\n", cpuid);
1956 static int sprdemand_gov_pm_notifier_call(struct notifier_block *nb,
1957 unsigned long event, void *dummy)
1959 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
1960 struct dbs_data *dbs_data = policy->governor_data;
1961 struct sd_dbs_tuners *sd_tuners = NULL;
1963 if (NULL == dbs_data) {
1964 pr_info("sprdemand_gov_pm_notifier_call governor %s return\n", policy->governor->name);
1965 if (g_sd_tuners == NULL)
1967 sd_tuners = g_sd_tuners;
1969 sd_tuners = dbs_data->tuners;
1972 /* in suspend and hibernation process, we need set frequency to the orignal
1973 * one to make sure all things go right */
1974 if (event == PM_SUSPEND_PREPARE || event == PM_HIBERNATION_PREPARE) {
1975 pr_info(" %s, recv pm suspend notify\n", __func__ );
1976 cpu_num_limit_temp = sd_tuners->cpu_num_limit;
1977 sd_tuners->cpu_num_limit = 1;
1979 if (!sd_tuners->cpu_hotplug_disable)
1980 schedule_work_on(0, &unplug_all_work);
1981 cpufreq_driver_target(policy, 1000000, CPUFREQ_RELATION_H);
1983 sd_tuners->is_suspend = true;
1984 g_is_suspend = true;
1985 pr_info(" %s, recv pm suspend notify done\n", __func__ );
1987 if (event == PM_POST_SUSPEND) {
1988 sd_tuners->is_suspend = false;
1989 g_is_suspend = false;
1990 sd_tuners->cpu_num_limit = cpu_num_limit_temp ;
1996 static struct notifier_block sprdemand_gov_pm_notifier = {
1997 .notifier_call = sprdemand_gov_pm_notifier_call,
2000 #ifdef CONFIG_TOUCH_BOOST
2001 static void dbs_refresh_callback(struct work_struct *work)
2003 unsigned int cpu = smp_processor_id();
2004 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(sd_cpu_dbs_info,
2006 struct cpufreq_policy *policy;
2008 policy = core_dbs_info->cdbs.cur_policy;
2010 if (!policy || g_is_suspend) {
2014 if (policy->cur < policy->max) {
2015 cpufreq_driver_target(policy,
2016 policy->max, CPUFREQ_RELATION_H);
2017 atomic_add(5, &g_atomic_tb_cnt);
2019 core_dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(cpu,
2020 &core_dbs_info->cdbs.prev_cpu_wall,
2021 should_io_be_busy());
2025 static void dbs_input_event(struct input_handle *handle, unsigned int type,
2026 unsigned int code, int value)
2031 if (time_before(jiffies, boot_done))
2034 if (strcmp(handle->dev->name, "focaltech_ts")&&
2035 strcmp(handle->dev->name,"msg2138_ts"))
2038 if (time_after(jiffies, tp_time) && !atomic_read(&g_atomic_tb_cnt))
2039 tp_time = jiffies + HZ / 2;
2046 if (!dvfs_plug_select)
2049 if (jiffies <= (tp_time + 10)) {
2054 ret = queue_work_on(0, input_wq, &dbs_refresh_work);
2055 pr_debug("[DVFS] dbs_input_event %d\n",ret);
2059 static int dbs_input_connect(struct input_handler *handler,
2060 struct input_dev *dev, const struct input_device_id *id)
2062 struct input_handle *handle;
2065 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
2070 handle->handler = handler;
2071 handle->name = "cpufreq";
2073 error = input_register_handle(handle);
2077 error = input_open_device(handle);
2081 pr_debug("[DVFS] dbs_input_connect register success\n");
2084 pr_info("[DVFS] dbs_input_connect register fail err1\n");
2085 input_unregister_handle(handle);
2087 pr_info("[DVFS] dbs_input_connect register fail err2\n");
2092 static void dbs_input_disconnect(struct input_handle *handle)
2094 input_close_device(handle);
2095 input_unregister_handle(handle);
2099 static const struct input_device_id dbs_ids[] = {
2100 { .driver_info = 1 },
2104 void sprd_tb_thread()
2108 dbs_refresh_callback(NULL);
2109 if (num_online_cpus() < 3)
2110 schedule_delayed_work_on(0, &plugin_work, 0);
2114 struct input_handler dbs_input_handler = {
2115 .event = dbs_input_event,
2116 .connect = dbs_input_connect,
2117 .disconnect = dbs_input_disconnect,
2118 .name = "cpufreq_ond",
2119 .id_table = dbs_ids,
2123 static struct task_struct *ksprd_tb;
2124 static int __init cpufreq_gov_dbs_init(void)
2127 boot_done = jiffies + GOVERNOR_BOOT_TIME;
2128 #if !(defined(CONFIG_MACH_SP9838AEA_5MOD) || defined(CONFIG_MACH_SP9838AEA_4CORE) || defined(CONFIG_MACH_SP9838AEA_8CORE_LIGHT_SLEEP) || defined(CONFIG_MACH_SS_SHARKLT8))
2129 register_pm_notifier(&sprdemand_gov_pm_notifier);
2131 g_sd_tuners = kzalloc(sizeof(struct sd_dbs_tuners), GFP_KERNEL);
2133 #ifdef CONFIG_TOUCH_BOOST
2135 input_wq = alloc_workqueue("iewq", WQ_MEM_RECLAIM|WQ_SYSFS, 1);
2139 printk(KERN_ERR "Failed to create iewq workqueue\n");
2143 INIT_WORK(&dbs_refresh_work, dbs_refresh_callback);
2147 if(input_register_handler(&dbs_input_handler))
2149 pr_err("[DVFS] input_register_handler failed\n");
2152 sema_init(&tb_sem, 0);
2154 ksprd_tb = kthread_create(sprd_tb_thread, NULL, "sprd_tb_thread");
2156 wake_up_process(ksprd_tb);
2159 return cpufreq_register_governor(&cpufreq_gov_sprdemand);
2162 static void __exit cpufreq_gov_dbs_exit(void)
2164 cpufreq_unregister_governor(&cpufreq_gov_sprdemand);
2165 unregister_pm_notifier(&sprdemand_gov_pm_notifier);
2167 #ifdef CONFIG_TOUCH_BOOST
2168 input_unregister_handler(&dbs_input_handler);
2169 kthread_stop(ksprd_tb);
2173 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
2174 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
2175 MODULE_DESCRIPTION("'cpufreq_sprdemand' - A dynamic cpufreq governor for "
2176 "Low Latency Frequency Transition capable processors");
2177 MODULE_LICENSE("GPL");
2179 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
2180 fs_initcall(cpufreq_gov_dbs_init);
2182 module_init(cpufreq_gov_dbs_init);
2184 module_exit(cpufreq_gov_dbs_exit);