drm: Cocci spatch "memdup.spatch"
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / cpufreq / cpufreq_sprdemand.c
1 /*
2  *  drivers/cpufreq/cpufreq_sprdemand.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/cpufreq.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/kobject.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/percpu-defs.h>
23 #include <linux/slab.h>
24 #include <linux/sysfs.h>
25 #include <linux/tick.h>
26 #include <linux/types.h>
27 #include <linux/cpu.h>
28 #include <linux/thermal.h>
29 #include <linux/err.h>
30 #include <linux/earlysuspend.h>
31 #include <linux/suspend.h>
32 #include <asm/cacheflush.h>
33 #include <linux/kthread.h>
34 #include <linux/delay.h>
35
36 #include "cpufreq_governor.h"
37 #include <linux/input.h>
38 #include <linux/sprd_cpu_cooling.h>
39 #include <linux/platform_device.h>
40 #ifdef CONFIG_OF
41 #include <linux/of_device.h>
42 #endif
43
44 /* On-demand governor macros */
45 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL         (10)
46 #define DEF_FREQUENCY_UP_THRESHOLD              (80)
47 #define DEF_SAMPLING_DOWN_FACTOR                (1)
48 #define MAX_SAMPLING_DOWN_FACTOR                (100000)
49 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL       (10)
50 #define MICRO_FREQUENCY_UP_THRESHOLD            (80)
51 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE         (10000)
52 #define MIN_FREQUENCY_UP_THRESHOLD              (11)
53 #define MAX_FREQUENCY_UP_THRESHOLD              (100)
54
55 /* whether plugin cpu according to this score up threshold */
56 #define DEF_CPU_SCORE_UP_THRESHOLD              (100)
57 /* whether unplug cpu according to this down threshold*/
58 #define DEF_CPU_LOAD_DOWN_THRESHOLD             (30)
59 #define DEF_CPU_DOWN_COUNT                      (3)
60
61 #define LOAD_CRITICAL 100
62 #define LOAD_HI 90
63 #define LOAD_MID 80
64 #define LOAD_LIGHT 50
65 #define LOAD_LO 0
66
67 #define LOAD_CRITICAL_SCORE 10
68 #define LOAD_HI_SCORE 5
69 #define LOAD_MID_SCORE 0
70 #define LOAD_LIGHT_SCORE -10
71 #define LOAD_LO_SCORE -20
72
73 #define DEF_CPU_UP_MID_THRESHOLD                (80)
74 #define DEF_CPU_UP_HIGH_THRESHOLD               (90)
75 #define DEF_CPU_DOWN_MID_THRESHOLD              (30)
76 #define DEF_CPU_DOWN_HIGH_THRESHOLD             (40)
77
78 #define GOVERNOR_BOOT_TIME      (50*HZ)
79 static unsigned long boot_done;
80
81 unsigned int cpu_hotplug_disable_set = false;
82 static int g_is_suspend = false;
83
84 #if 0
85 struct unplug_work_info {
86         unsigned int cpuid;
87         struct delayed_work unplug_work;
88         struct dbs_data *dbs_data;
89 };
90 static DEFINE_PER_CPU(struct unplug_work_info, uwi);
91 #endif
92
93 struct delayed_work plugin_work;
94 struct delayed_work unplug_work;
95 struct work_struct thm_unplug_work;
96 struct work_struct plugin_all_work;
97 struct work_struct unplug_all_work;
98 static int cpu_num_limit_temp;
99 static void sprd_thm_unplug_cpu(struct work_struct *work);
100
101 static DEFINE_PER_CPU(struct unplug_work_info, uwi);
102
103 static DEFINE_SPINLOCK(g_lock);
104 static unsigned int percpu_total_load[CONFIG_NR_CPUS] = {0};
105 static unsigned int percpu_check_count[CONFIG_NR_CPUS] = {0};
106 static int cpu_score = 0;
107
108 /* FIXME. default touch boost is enabled */
109 #define CONFIG_TOUCH_BOOST
110
111 #ifdef CONFIG_TOUCH_BOOST
112 static struct task_struct *ksprd_tb;
113 atomic_t g_atomic_tb_cnt = ATOMIC_INIT(0);
114 struct semaphore tb_sem;
115 static unsigned long tp_time;
116
117 #if 0
118 static struct workqueue_struct *input_wq;
119 static struct work_struct dbs_refresh_work;
120 #endif
121
122 #endif
123
124 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, sd_cpu_dbs_info);
125
126 static struct od_ops sd_ops;
127
128 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
129 static struct cpufreq_governor cpufreq_gov_sprdemand;
130 #endif
131
132 static void update_sampling_rate(struct dbs_data *dbs_data,     unsigned int new_rate);
133
134 static void sprdemand_powersave_bias_init_cpu(int cpu)
135 {
136         struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info, cpu);
137
138         dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
139         dbs_info->freq_lo = 0;
140 }
141
142 /*
143  * Not all CPUs want IO time to be accounted as busy; this depends on how
144  * efficient idling at a higher frequency/voltage is.
145  * Pavel Machek says this is not so for various generations of AMD and old
146  * Intel systems.
147  * Mike Chan (android.com) claims this is also not true for ARM.
148  * Because of this, whitelist specific known (series) of CPUs by default, and
149  * leave all others up to the user.
150  */
151 static int should_io_be_busy(void)
152 {
153 #if defined(CONFIG_X86)
154         /*
155          * For Intel, Core 2 (model 15) and later have an efficient idle.
156          */
157         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
158                         boot_cpu_data.x86 == 6 &&
159                         boot_cpu_data.x86_model >= 15)
160                 return 1;
161 #endif
162         return 1;
163 }
164
165 struct sd_dbs_tuners *g_sd_tuners = NULL;
166 int cpu_core_thermal_limit(int cluster, int max_core)
167 {
168
169         struct sd_dbs_tuners *sd_tuners = g_sd_tuners;
170         int cpus = 0;
171         int i = 0;
172
173         if (sd_tuners->cpu_num_limit <=  max_core) {
174                 sd_tuners->cpu_num_limit = max_core;
175                 return 0;
176         }
177         sd_tuners->cpu_num_limit = max_core;
178         schedule_work_on(0, &thm_unplug_work);
179
180         return 0;
181 }
182
183 /*
184  * Find right freq to be set now with powersave_bias on.
185  * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
186  * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
187  */
188 static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
189                 unsigned int freq_next, unsigned int relation)
190 {
191         unsigned int freq_req, freq_reduc, freq_avg;
192         unsigned int freq_hi, freq_lo;
193         unsigned int index = 0;
194         unsigned int jiffies_total, jiffies_hi, jiffies_lo;
195         struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info,
196                                                    policy->cpu);
197         struct dbs_data *dbs_data = policy->governor_data;
198         struct sd_dbs_tuners *sd_tuners = NULL;
199
200         if (NULL == dbs_data) {
201                 pr_info("generic_powersave_bias_target governor %s return\n", policy->governor->name);
202                 if (g_sd_tuners == NULL)
203                         return freq_next;
204                 sd_tuners = g_sd_tuners;
205         } else {
206                 sd_tuners = dbs_data->tuners;
207         }
208
209         if (!dbs_info->freq_table) {
210                 dbs_info->freq_lo = 0;
211                 dbs_info->freq_lo_jiffies = 0;
212                 return freq_next;
213         }
214
215         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
216                         relation, &index);
217         freq_req = dbs_info->freq_table[index].frequency;
218         freq_reduc = freq_req * sd_tuners->powersave_bias / 1000;
219         freq_avg = freq_req - freq_reduc;
220
221         /* Find freq bounds for freq_avg in freq_table */
222         index = 0;
223         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
224                         CPUFREQ_RELATION_H, &index);
225         freq_lo = dbs_info->freq_table[index].frequency;
226         index = 0;
227         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
228                         CPUFREQ_RELATION_L, &index);
229         freq_hi = dbs_info->freq_table[index].frequency;
230
231         /* Find out how long we have to be in hi and lo freqs */
232         if (freq_hi == freq_lo) {
233                 dbs_info->freq_lo = 0;
234                 dbs_info->freq_lo_jiffies = 0;
235                 return freq_lo;
236         }
237         jiffies_total = usecs_to_jiffies(sd_tuners->sampling_rate);
238         jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
239         jiffies_hi += ((freq_hi - freq_lo) / 2);
240         jiffies_hi /= (freq_hi - freq_lo);
241         jiffies_lo = jiffies_total - jiffies_hi;
242         dbs_info->freq_lo = freq_lo;
243         dbs_info->freq_lo_jiffies = jiffies_lo;
244         dbs_info->freq_hi_jiffies = jiffies_hi;
245         return freq_hi;
246 }
247
248 static void sprdemand_powersave_bias_init(void)
249 {
250         int i;
251         for_each_online_cpu(i) {
252                 sprdemand_powersave_bias_init_cpu(i);
253         }
254 }
255
256 static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
257 {
258         struct dbs_data *dbs_data = p->governor_data;
259         struct sd_dbs_tuners *sd_tuners = NULL;
260
261         if (NULL == dbs_data) {
262                 pr_info("dbs_freq_increase governor %s return\n", p->governor->name);
263                 if (g_sd_tuners == NULL)
264                         return ;
265                 sd_tuners = g_sd_tuners;
266         } else {
267                 sd_tuners = dbs_data->tuners;
268         }
269
270         if (sd_tuners->powersave_bias)
271                 freq = sd_ops.powersave_bias_target(p, freq,
272                                 CPUFREQ_RELATION_H);
273         else if (p->cur == p->max)
274                 return;
275
276         __cpufreq_driver_target(p, freq, sd_tuners->powersave_bias ?
277                         CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
278 }
279
280 static void sprd_unplug_one_cpu(struct work_struct *work)
281 {
282         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
283         struct dbs_data *dbs_data = policy->governor_data;
284         struct sd_dbs_tuners *sd_tuners = NULL;
285         int cpuid;
286
287         if (NULL == dbs_data) {
288                 pr_info("sprd_unplug_one_cpu return\n");
289                 if (g_sd_tuners == NULL)
290                         return ;
291                 sd_tuners = g_sd_tuners;
292         } else {
293                 sd_tuners = dbs_data->tuners;
294         }
295
296 #ifdef CONFIG_HOTPLUG_CPU
297         if (num_online_cpus() > 1) {
298                 if (!sd_tuners->cpu_hotplug_disable) {
299                         cpuid = cpumask_next(0, cpu_online_mask);
300                         pr_debug("!!  we gonna unplug cpu%d  !!\n", cpuid);
301                         cpu_down(cpuid);
302                 }
303         }
304 #endif
305         return;
306 }
307
308 static void sprd_plugin_one_cpu(struct work_struct *work)
309 {
310         int cpuid;
311         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
312         struct dbs_data *dbs_data = policy->governor_data;
313         struct sd_dbs_tuners *sd_tuners = NULL;
314
315         if (NULL == dbs_data) {
316                 pr_info("sprd_plugin_one_cpu return\n");
317                 if (g_sd_tuners == NULL)
318                         return ;
319                 sd_tuners = g_sd_tuners;
320         } else {
321                 sd_tuners = dbs_data->tuners;
322         }
323
324 #ifdef CONFIG_HOTPLUG_CPU
325         if (num_online_cpus() < sd_tuners->cpu_num_limit) {
326                 cpuid = cpumask_next_zero(0, cpu_online_mask);
327                 if (!sd_tuners->cpu_hotplug_disable) {
328                         pr_debug("!!  we gonna plugin cpu%d  !!\n", cpuid);
329                         cpu_up(cpuid);
330                 }
331         }
332 #endif
333         return;
334 }
335
336 static void sprd_unplug_all_cpu(struct work_struct *work)
337 {
338         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
339         struct dbs_data *dbs_data = policy->governor_data;
340         struct sd_dbs_tuners *sd_tuners = NULL;
341         int cpu;
342
343         if (NULL == dbs_data) {
344                 pr_info("sprd_unplug_all_cpu return\n");
345                 if (g_sd_tuners == NULL)
346                         return ;
347                 sd_tuners = g_sd_tuners;
348         } else {
349                 sd_tuners = dbs_data->tuners;
350         }
351
352 #ifdef CONFIG_HOTPLUG_CPU
353         if (num_online_cpus() > 1) {
354                 for_each_online_cpu(cpu) {
355                         if (0 == cpu)
356                                 continue;
357                         pr_info("!!  all gonna unplug cpu%d  !!\n", cpu);
358                         cpu_down(cpu);
359                 }
360         }
361 #endif
362         return;
363 }
364
365 static void sprd_plugin_all_cpu(struct work_struct *work)
366 {
367         int cpu;
368         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
369         struct dbs_data *dbs_data = policy->governor_data;
370         struct sd_dbs_tuners *sd_tuners = NULL;
371
372         if (NULL == dbs_data) {
373                 pr_info("sprd_plugin_all_cpu return\n");
374                 if (g_sd_tuners == NULL)
375                         return ;
376                 sd_tuners = g_sd_tuners;
377         } else {
378                 sd_tuners = dbs_data->tuners;
379         }
380
381 #ifdef CONFIG_HOTPLUG_CPU
382         if (num_online_cpus() < sd_tuners->cpu_num_limit) {
383                 for_each_possible_cpu(cpu) {
384                         if (!cpu_online(cpu)) {
385                                 pr_info("!! all gonna plugin cpu%d  !!\n",
386                                                 cpu);
387                                 cpu_up(cpu);
388                         }
389                 }
390         }
391 #endif
392         return;
393 }
394
395 unsigned int percpu_load[4] = {0};
396 #define MAX_CPU_NUM  (4)
397 #define MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE  (10)
398 #define MAX_PLUG_AVG_LOAD_SIZE (2)
399
400 unsigned int ga_percpu_total_load[MAX_CPU_NUM][MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE] = {{0}};
401 extern unsigned int dvfs_unplug_select;
402 extern unsigned int dvfs_plug_select;
403
404 unsigned int cur_window_size[MAX_CPU_NUM] ={0};
405 unsigned int prev_window_size[MAX_CPU_NUM] ={0};
406
407 int cur_window_index[MAX_CPU_NUM] = {0};
408 unsigned int cur_window_cnt[MAX_CPU_NUM] = {0};
409 int first_window_flag[4] = {0};
410
411 unsigned int sum_load[4] = {0};
412
413 unsigned int plug_avg_load[MAX_CPU_NUM][MAX_PLUG_AVG_LOAD_SIZE] = {{50}};
414 unsigned int plug_avg_load_index[MAX_CPU_NUM] = {0};
415
416 #define mod(n, div) ((n) % (div))
417
418 extern unsigned int dvfs_score_select;
419 extern unsigned int dvfs_score_hi[4];
420 extern unsigned int dvfs_score_mid[4];
421 extern unsigned int dvfs_score_critical[4];
422
423 int a_score_sub[4][4][11]=
424 {
425         {
426                 {0,0,0,0,0,0,0,0,5,5,10},
427                 {-5,-5,0,0,0,0,0,0,0,5,5},
428                 {-10,-5,0,0,0,0,0,0,0,5,5},
429                 {0,0,0,0,0,0,0,0,0,0,0}
430         },
431         {
432                 {0,0,0,0,0,0,0,0,13,13,30},
433                 {-9,-9,0,0,0,0,0,0,9,9,10},
434                 {-18,-9,0,0,0,0,0,0,4,5,9},
435                 {0,0,0,0,0,0,0,0,0,0,0}
436         },
437         {
438                 {0,0,0,0,0,0,0,10,20,20,30},
439                 {0,0,0,0,0,0,0,5,10,10,20},
440                 {0,0,0,0,0,0,0,0,5,5,10},
441                 {0,0,0,0,0,0,0,0,0,0,0}
442         },
443         {
444                 {0,0,0,0,0,0,0,0,30,30,50},
445                 {-20,-20,0,0,0,0,0,0,20,20,30},
446                 {-40,-20,0,0,0,0,0,0,5,10,20},
447                 {0,0,0,0,0,0,0,0,0,0,0}
448         }
449 };
450
451 int ga_samp_rate[11] = {100000,100000,100000,100000,100000,100000,50000,50000,30000,30000,30000};
452
453 unsigned int a_sub_windowsize[8][6] =
454 {
455         {0,0,0,0,0,0},
456         {0,0,0,0,0,0},
457         {4,5,5,6,7,7},
458         {4,5,5,6,7,7},
459         {3,4,4,5,6,6},
460         {2,3,3,4,5,5},
461         {1,2,2,3,4,4},
462         {0,1,1,2,3,3}
463 };
464
465 static int cpu_evaluate_score(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
466 {
467         int score = 0;
468         static int rate[4] = {1};
469         int delta = 0;
470         int a_samp_rate[5] = {30000,30000,50000,50000,50000};
471
472         if(dvfs_score_select < 4)
473         {
474                 if (load >= sd_tunners->load_critical)
475                 {
476                         score = dvfs_score_critical[num_online_cpus()];
477                         sd_tunners->sampling_rate = a_samp_rate[0];
478                 }
479                 else if (load >= sd_tunners->load_hi)
480                 {
481                         score = dvfs_score_hi[num_online_cpus()];
482                         sd_tunners->sampling_rate = a_samp_rate[1];
483                 }
484                 else if (load >= sd_tunners->load_mid)
485                 {
486                         score = dvfs_score_mid[num_online_cpus()];
487                         sd_tunners->sampling_rate = a_samp_rate[2];
488                 }
489                 else if (load >= sd_tunners->load_light)
490                 {
491                         score = sd_tunners->load_light_score;
492                         sd_tunners->sampling_rate = a_samp_rate[3];
493                 }
494                 else if (load >= sd_tunners->load_lo)
495                 {
496                         score = sd_tunners->load_lo_score;
497                         sd_tunners->sampling_rate = a_samp_rate[4];
498                 }
499                 else
500                 {
501                         score = 0;
502                         sd_tunners->sampling_rate = a_samp_rate[4];
503                 }
504
505         }
506         else
507         {
508                 delta = abs(percpu_load[cpu] - load);
509                 if((delta > 30)
510                         &&(load > 80))
511                 {
512                         if (unlikely(rate[cpu] > 100))
513                                 rate[cpu] = 1;
514
515                         rate[cpu] +=2;
516                         score = a_score_sub[dvfs_score_select % 4][num_online_cpus() - 1][load/10] * rate[cpu];
517                         rate[cpu] --;
518                 }
519                 else
520                 {
521                         score = a_score_sub[dvfs_score_select % 4][num_online_cpus() - 1][load/10];
522                         rate[cpu] = 1;
523                 }
524         }
525         pr_debug("[DVFS SCORE] rate[%d] %d load %d score %d\n",cpu,rate[cpu],load,score);
526         return score;
527 }
528
529
530
531 static int sd_adjust_window(struct sd_dbs_tuners *sd_tunners , unsigned int load)
532 {
533         unsigned int cur_window_size = 0;
534
535         if (load >= sd_tunners->load_critical)
536                 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][0];
537         else if (load >= sd_tunners->load_hi)
538                 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][1];
539         else if (load >= sd_tunners->load_mid)
540                 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][2];
541         else if (load >= sd_tunners->load_light)
542                 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][3];
543         else if (load >= sd_tunners->load_lo)
544                 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][4];
545         else
546                 cur_window_size = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - a_sub_windowsize[dvfs_unplug_select][5];
547
548         return cur_window_size;
549 }
550
551 static unsigned int sd_unplug_avg_load(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
552 {
553         int sum_idx_lo = 0;
554         unsigned int sum_idx_hi = 0;
555         unsigned int * p_valid_pos = NULL;
556         unsigned int sum_load = 0;
557
558         /*
559         initialize the window size for the first time
560         */
561         if(!cur_window_size[cpu])
562         {
563                 cur_window_size[cpu] = sd_adjust_window(sd_tunners,load);
564                 pr_debug("[DVFS_UNPLUG]cur_window_size[%d] = %d\n",cpu,cur_window_size[cpu]);
565                 return 100;
566         }
567         else
568         {
569                 /*
570                 record the load in the percpu array
571                 */
572                 ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
573                 cur_window_cnt[cpu]++;
574                 /*
575                 update the windw index
576                 */
577                 cur_window_index[cpu]++;
578                 cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
579
580                 /*
581                 window array is not full, break
582                 */
583                 if(cur_window_cnt[cpu] < cur_window_size[cpu])
584                 {
585                 return 100;
586                 }
587                 else
588                 {
589                         /*
590                         adjust the window index for it be added one more extra time
591                         */
592                         if(!cur_window_index[cpu])
593                         {
594                                 cur_window_index[cpu] = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - 1;
595                         }
596                         else
597                         {
598                                 cur_window_index[cpu]--;
599                         }
600                         /*
601                         find the valid position according to current window size and indexs
602                         */
603                         p_valid_pos = (unsigned int *)&ga_percpu_total_load[cpu][cur_window_index[cpu]];
604                         /*
605                         calculate the average load value by decrease the index, for we need the very updated value which locate in the end of the array
606                         */
607                         for(sum_idx_lo = 0; sum_idx_lo < cur_window_size[cpu]; sum_idx_lo++)
608                         {
609                                 /*
610                                 calculate the lower part
611                                 */
612                                 if((cur_window_index[cpu] - sum_idx_lo) >=0)
613                                 {
614                                         sum_load += *(unsigned int *)((unsigned int)p_valid_pos - sum_idx_lo * sizeof(p_valid_pos));
615                                 }
616                                 else
617                                 {
618                                         /*
619                                         calculate the higher part
620                                         */
621                                         sum_idx_hi = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - (cur_window_size[cpu] - sum_idx_lo);
622                                         for(; sum_idx_hi < MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE; sum_idx_hi++)
623                                         {
624                                                 sum_load += ga_percpu_total_load[cpu][sum_idx_hi];
625                                         }
626                                         break;
627
628                                 }
629
630                         }
631                         sum_load = sum_load / cur_window_size[cpu];
632                         /*
633                         adjust the window according to previews load
634                         */
635                         cur_window_size[cpu] = sd_adjust_window(sd_tunners, sum_load);
636                         cur_window_cnt[cpu] = 0;
637                         pr_debug("[DVFS_UNPLUG]cur_window_size %d sum_load %d\n",cur_window_size[cpu],sum_load);
638                 }
639                 return sum_load;
640         }
641
642 }
643
644
645 static unsigned int sd_unplug_avg_load1(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
646 {
647         int avg_load = 0;
648         int cur_window_pos = 0;
649         int cur_window_pos_tail = 0;
650         int idx = 0;
651
652         /*
653         initialize the window size for the first time
654         cur_window_cnt[cpu] will be cleared when the core is unpluged
655         */
656         if((!first_window_flag[cpu])
657                 ||(!cur_window_size[cpu]))
658         {
659                 if(!cur_window_size[cpu])
660                 {
661                         cur_window_size[cpu] = sd_adjust_window(sd_tunners,load);
662                         prev_window_size[cpu] = cur_window_size[cpu];
663                 }
664                 if(cur_window_cnt[cpu] < (cur_window_size[cpu] - 1))
665                 {
666                         /*
667                         record the load in the percpu array
668                         */
669                         ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
670                         /*
671                         update the windw index
672                         */
673                         cur_window_index[cpu]++;
674                         cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
675
676                         cur_window_cnt[cpu]++;
677
678                         sum_load[cpu] += load;
679
680                         return LOAD_LIGHT;
681                 }
682                 else
683                 {
684                         first_window_flag[cpu] = 1;
685                 }
686         }
687         /*
688         record the load in the percpu array
689         */
690         ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
691         /*
692         update the windw index
693         */
694         cur_window_index[cpu]++;
695         cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
696
697         /*
698         adjust the window index for it be added one more extra time
699         */
700         if(!cur_window_index[cpu])
701         {
702                 cur_window_pos = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - 1;
703         }
704         else
705         {
706                 cur_window_pos =  cur_window_index[cpu] - 1;
707         }
708
709         /*
710         tail = (c_w_p + max_window_size - c_w_s) % max_window_size
711         tail = (2 + 8 - 5) % 8 = 5
712         tail = (6 + 8 - 5) % 8 = 1
713         */
714         cur_window_pos_tail = mod(MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE + cur_window_pos - cur_window_size[cpu],MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
715
716         /*
717         no window size change
718         */
719         if(prev_window_size[cpu] == cur_window_size[cpu] )
720         {
721                 sum_load[cpu] = sum_load[cpu] + ga_percpu_total_load[cpu][cur_window_pos] - ga_percpu_total_load[cpu][cur_window_pos_tail] ;
722         }
723         else
724         {
725                 /*
726                 window size change, recalculate the sum load
727                 */
728                 sum_load[cpu] = 0;
729                 while(idx < cur_window_size[cpu])
730                 {
731                         sum_load[cpu] += ga_percpu_total_load[cpu][mod(cur_window_pos_tail + 1 +idx,MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE)];
732                         idx++;
733                 }
734         }
735         avg_load = sum_load[cpu] / cur_window_size[cpu];
736
737         percpu_load[cpu] = avg_load;
738
739         prev_window_size[cpu] = cur_window_size[cpu];
740
741         cur_window_size[cpu] = (load > avg_load) ? sd_adjust_window(sd_tunners, load) : prev_window_size[cpu];
742
743         sd_tunners->sampling_rate = ga_samp_rate[mod(avg_load/10,11)];
744
745         pr_debug("[DVFS_UNPLUG]sum_load[%d]=%d tail[%d]=%d cur[%d]=%d  cur_window_size %d load %d avg_load %d\n",cpu,sum_load[cpu],cur_window_pos_tail,
746                 ga_percpu_total_load[cpu][cur_window_pos_tail],cur_window_pos,ga_percpu_total_load[cpu][cur_window_pos],cur_window_size[cpu],load,avg_load);
747         if(avg_load > 100)
748         {
749                 pr_info("cur_window_pos %d cur_window_pos_tail %d load %d sum_load %d\n",cur_window_pos,cur_window_pos_tail,load,sum_load[cpu] );
750         }
751         return avg_load;
752
753 }
754
755 static unsigned int sd_unplug_avg_load11(int cpu, struct sd_dbs_tuners *sd_tunners , unsigned int load)
756 {
757         int avg_load = 0;
758         int cur_window_pos = 0;
759         int cur_window_pos_tail = 0;
760         int idx = 0;
761         /*
762         initialize the window size for the first time
763         cur_window_cnt[cpu] will be cleared when the core is unpluged
764         */
765         if((!first_window_flag[cpu])
766                 ||(!cur_window_size[cpu]))
767         {
768                 if(!cur_window_size[cpu])
769                 {
770                         cur_window_size[cpu] = sd_adjust_window(sd_tunners,load);
771                         prev_window_size[cpu] = cur_window_size[cpu];
772                 }
773                 if(cur_window_cnt[cpu] < (cur_window_size[cpu] - 1))
774                 {
775                         /*
776                         record the load in the percpu array
777                         */
778                         ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
779                         /*
780                         update the windw index
781                         */
782                         cur_window_index[cpu]++;
783                         cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
784
785                         cur_window_cnt[cpu]++;
786
787                         sum_load[cpu] += load;
788
789                         return LOAD_LIGHT;
790                 }
791                 else
792                 {
793                         first_window_flag[cpu] = 1;
794                 }
795         }
796         /*
797         record the load in the percpu array
798         */
799         ga_percpu_total_load[cpu][cur_window_index[cpu]] = load;
800         /*
801         update the windw index
802         */
803         cur_window_index[cpu]++;
804         cur_window_index[cpu] = mod(cur_window_index[cpu], MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
805
806         /*
807         adjust the window index for it be added one more extra time
808         */
809         if(!cur_window_index[cpu])
810         {
811                 cur_window_pos = MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE - 1;
812         }
813         else
814         {
815                 cur_window_pos =  cur_window_index[cpu] - 1;
816         }
817
818         /*
819         tail = (c_w_p + max_window_size - c_w_s) % max_window_size
820         tail = (2 + 8 - 5) % 8 = 5
821         tail = (6 + 8 - 5) % 8 = 1
822         */
823         cur_window_pos_tail = mod(MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE + cur_window_pos - cur_window_size[cpu],MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
824
825         /*
826         sum load = current load + current new data - tail data
827         */
828         sum_load[cpu] = sum_load[cpu] + ga_percpu_total_load[cpu][cur_window_pos] - ga_percpu_total_load[cpu][cur_window_pos_tail] ;
829
830         /*
831         calc the average load
832         */
833         avg_load = sum_load[cpu] / cur_window_size[cpu];
834
835         percpu_load[cpu] = avg_load;
836
837         sd_tunners->sampling_rate = ga_samp_rate[mod(avg_load/10,11)];
838
839         return avg_load;
840 }
841
842 #define MAX_ARRAY_SIZE  (10)
843 #define LOAD_WINDOW_SIZE  (3)
844 unsigned int load_array[CONFIG_NR_CPUS][MAX_ARRAY_SIZE] = { {0} };
845 unsigned int window_index[CONFIG_NR_CPUS] = {0};
846
847 static unsigned int sd_avg_load(int cpu, struct sd_dbs_tuners *sd_tuners,
848                         unsigned int load)
849 {
850         unsigned int count;
851         unsigned int scale;
852         unsigned int sum_scale = 0;
853         unsigned int sum_load = 0;
854         unsigned int window_tail = 0, window_head = 0;
855
856         load_array[cpu][window_index[cpu]] = load;
857         window_index[cpu]++;
858         window_index[cpu] = mod(window_index[cpu], MAX_ARRAY_SIZE);
859         if(!window_index[cpu])
860                 window_tail = MAX_ARRAY_SIZE - 1;
861         else
862                 window_tail = window_index[cpu] - 1;
863
864         window_head = mod(MAX_ARRAY_SIZE + window_tail -
865                         sd_tuners->window_size + 1, MAX_ARRAY_SIZE);
866         for (scale = 1, count = 0; count < sd_tuners->window_size;
867                         scale += scale, count++) {
868                 pr_debug("load_array[%d][%d]: %d, scale: %d\n",
869                         cpu, window_head, load_array[cpu][window_head], scale);
870                 sum_load += (load_array[cpu][window_head] * scale);
871                 sum_scale += scale;
872                 window_head++;
873                 window_head = mod(window_head, MAX_ARRAY_SIZE);
874         }
875
876         return sum_load / sum_scale;
877 }
878
879 /*
880  * Every sampling_rate, we check, if current idle time is less than 20%
881  * (default), then we try to increase frequency. Every sampling_rate, we look
882  * for the lowest frequency which can sustain the load while keeping idle time
883  * over 30%. If such a frequency exist, we try to decrease to this frequency.
884  *
885  * Any frequency increase takes it to the maximum frequency. Frequency reduction
886  * happens at minimum steps of 5% (default) of current frequency
887  */
888 static void sd_check_cpu(int cpu, unsigned int load)
889 {
890         struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info, cpu);
891         struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
892         struct dbs_data *dbs_data = policy->governor_data;
893         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
894         unsigned int itself_avg_load = 0;
895         int local_cpu = 0;
896
897         if (time_before(jiffies, boot_done))
898                 return;
899
900         local_cpu = smp_processor_id();
901
902         if (local_cpu)
903                 return;
904
905         /* skip cpufreq adjustment if system enter into suspend */
906         if (true == sd_tuners->is_suspend) {
907                 pr_info("%s: is_suspend=%s, skip cpufreq adjust\n",
908                         __func__, sd_tuners->is_suspend?"true":"false");
909                 goto plug_check;
910         }
911
912         dbs_info->freq_lo = 0;
913         pr_debug("efficient load %d, cur freq %d, online CPUs %d\n",
914                         load, policy->cur, num_online_cpus());
915
916 #ifdef CONFIG_TOUCH_BOOST
917         if (atomic_read(&g_atomic_tb_cnt)) {
918                 atomic_sub_return(1, &g_atomic_tb_cnt);
919                 goto plug_check;
920         }
921 #endif
922
923         /* Check for frequency increase */
924         if (load > sd_tuners->up_threshold) {
925                 /* If switching to max speed, apply sampling_down_factor */
926                 if (policy->cur < policy->max)
927                         dbs_info->rate_mult =
928                                 sd_tuners->sampling_down_factor;
929                 if (num_online_cpus() == sd_tuners->cpu_num_limit)
930                         dbs_freq_increase(policy, policy->max);
931                 else
932                         dbs_freq_increase(policy, policy->max-1);
933                 goto plug_check;
934         }
935
936         /* Check for frequency decrease */
937         /* if we cannot reduce the frequency anymore, break out early */
938         if (policy->cur == policy->min)
939                 goto plug_check;
940
941         /*
942          * The optimal frequency is the frequency that is the lowest that can
943          * support the current CPU usage without triggering the up policy. To be
944          * safe, we focus 3 points under the threshold.
945          */
946         if (load < sd_tuners->adj_up_threshold) {
947                 unsigned int freq_next;
948                 unsigned int load_freq;
949                 load_freq = load * policy->cur;
950                 freq_next = load_freq / sd_tuners->adj_up_threshold;
951                 /* No longer fully busy, reset rate_mult */
952                 dbs_info->rate_mult = 1;
953
954                 if (freq_next < policy->min)
955                         freq_next = policy->min;
956
957                 if (!sd_tuners->powersave_bias) {
958                         __cpufreq_driver_target(policy, freq_next,
959                                         CPUFREQ_RELATION_L);
960                         goto plug_check;
961                 }
962
963                 freq_next = sd_ops.powersave_bias_target(policy, freq_next,
964                                         CPUFREQ_RELATION_L);
965                 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
966
967         }
968
969 plug_check:
970
971         /* skip cpu hotplug check if hotplug is disabled */
972         if (sd_tuners->cpu_hotplug_disable)
973                 return;
974
975         /* cpu plugin check */
976         itself_avg_load = sd_avg_load(cpu, sd_tuners, load);
977         pr_debug(" itself_avg_load %d\n", itself_avg_load);
978         if (num_online_cpus() < sd_tuners->cpu_num_limit) {
979                 int cpu_up_threshold;
980
981                 if (num_online_cpus() == 1)
982                         cpu_up_threshold = sd_tuners->cpu_up_mid_threshold;
983                 else
984                         cpu_up_threshold = sd_tuners->cpu_up_high_threshold;
985
986                 if (itself_avg_load > cpu_up_threshold) {
987                         schedule_delayed_work_on(0, &plugin_work, 0);
988                         return;
989                 }
990         }
991
992         /* cpu unplug check */
993         if (num_online_cpus() > 1) {
994                 int cpu_down_threshold;
995
996                 if (num_online_cpus() > 2)
997                         cpu_down_threshold = sd_tuners->cpu_down_high_threshold;
998                 else
999                         cpu_down_threshold = sd_tuners->cpu_down_mid_threshold;
1000
1001                 if (itself_avg_load < cpu_down_threshold)
1002                         schedule_delayed_work_on(0, &unplug_work, 0);
1003         }
1004
1005 #if 0
1006         itself_avg_load = sd_unplug_avg_load1(local_cpu, sd_tuners, load);
1007         /* cpu plugin check */
1008         if(num_online_cpus() < sd_tuners->cpu_num_limit) {
1009                 cpu_score += cpu_evaluate_score(policy->cpu,sd_tuners, itself_avg_load);
1010                 if (cpu_score < 0)
1011                         cpu_score = 0;
1012                 if (cpu_score >= sd_tuners->cpu_score_up_threshold) {
1013                         pr_debug("cpu_score=%d, begin plugin cpu!\n", cpu_score);
1014                         cpu_score = 0;
1015                         schedule_delayed_work_on(0, &plugin_work, 0);
1016                         return;
1017                 }
1018         }
1019
1020
1021         /* cpu unplug check */
1022         puwi = &per_cpu(uwi, local_cpu);
1023         if((num_online_cpus() > 1) && (dvfs_unplug_select == 1)){
1024                 percpu_total_load[local_cpu] += load;
1025                 percpu_check_count[local_cpu]++;
1026                 if(percpu_check_count[cpu] == sd_tuners->cpu_down_count) {
1027                         /* calculate itself's average load */
1028                         itself_avg_load = percpu_total_load[local_cpu]/sd_tuners->cpu_down_count;
1029                         pr_debug("check unplug: for cpu%u avg_load=%d\n", local_cpu, itself_avg_load);
1030                         if(itself_avg_load < sd_tuners->cpu_down_threshold) {
1031                                         pr_info("cpu%u's avg_load=%d,begin unplug cpu\n",
1032                                                 policy->cpu, itself_avg_load);
1033                                         schedule_delayed_work_on(0, &unplug_work, 0);
1034                         }
1035                         percpu_check_count[local_cpu] = 0;
1036                         percpu_total_load[local_cpu] = 0;
1037                 }
1038         }
1039         else if((num_online_cpus() > 1) && (dvfs_unplug_select == 2))
1040         {
1041                 /* calculate itself's average load */
1042                 pr_debug("check unplug: for cpu%u avg_load=%d\n", local_cpu, itself_avg_load);
1043                 if(itself_avg_load < sd_tuners->cpu_down_threshold)
1044                 {
1045                                 pr_info("cpu%u's avg_load=%d,begin unplug cpu\n",
1046                                                 local_cpu, itself_avg_load);
1047                                 percpu_load[local_cpu] = 0;
1048                                 cur_window_size[local_cpu] = 0;
1049                                 cur_window_index[local_cpu] = 0;
1050                                 cur_window_cnt[local_cpu] = 0;
1051                                 prev_window_size[local_cpu] = 0;
1052                                 first_window_flag[local_cpu] = 0;
1053                                 sum_load[local_cpu] = 0;
1054                                 memset(&ga_percpu_total_load[local_cpu][0],0,sizeof(int) * MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
1055                                 schedule_delayed_work_on(0, &unplug_work, 0);
1056                 }
1057         }
1058         else if((num_online_cpus() > 1) && (dvfs_unplug_select > 2))
1059         {
1060                 /* calculate itself's average load */
1061                 itself_avg_load = sd_unplug_avg_load11(local_cpu, sd_tuners, load);
1062                 pr_debug("check unplug: for cpu%u avg_load=%d\n", local_cpu, itself_avg_load);
1063                 if(itself_avg_load < sd_tuners->cpu_down_threshold)
1064                 {
1065                                 pr_info("cpu%u's avg_load=%d,begin unplug cpu\n",
1066                                                 local_cpu, itself_avg_load);
1067                                 percpu_load[local_cpu] = 0;
1068                                 cur_window_size[local_cpu] = 0;
1069                                 cur_window_index[local_cpu] = 0;
1070                                 cur_window_cnt[local_cpu] = 0;
1071                                 prev_window_size[local_cpu] = 0;
1072                                 first_window_flag[local_cpu] = 0;
1073                                 sum_load[local_cpu] = 0;
1074                                 memset(&ga_percpu_total_load[local_cpu][0],0,sizeof(int) * MAX_PERCPU_TOTAL_LOAD_WINDOW_SIZE);
1075                                 schedule_delayed_work_on(0, &unplug_work, 0);
1076                 }
1077         }
1078 #endif
1079 }
1080
1081 static void sd_dbs_timer(struct work_struct *work)
1082 {
1083         struct od_cpu_dbs_info_s *dbs_info =
1084                 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
1085         unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
1086         struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(sd_cpu_dbs_info,
1087                         cpu);
1088         struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
1089         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1090         int delay = 0, sample_type = core_dbs_info->sample_type;
1091         bool modify_all = false;
1092
1093         if (smp_processor_id())
1094                 return;
1095
1096         /* CPUFREQ_GOV_STOP will set cur_policy as NULL*/
1097         if (NULL == core_dbs_info->cdbs.cur_policy) {
1098                 pr_err("%s cur_policy is cleared, just exit\n", __func__);
1099                 return;
1100         }
1101
1102         mutex_lock(&core_dbs_info->cdbs.timer_mutex);
1103         if (time_before(jiffies, boot_done))
1104                 goto max_delay;
1105
1106         if (!need_load_eval(&core_dbs_info->cdbs, sd_tuners->sampling_rate)) {
1107                 modify_all = false;
1108                 goto max_delay;
1109         }
1110
1111         /* Common NORMAL_SAMPLE setup */
1112         core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
1113         if (sample_type == OD_SUB_SAMPLE) {
1114                 delay = core_dbs_info->freq_lo_jiffies;
1115                 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
1116                                 core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
1117         } else {
1118                 dbs_check_cpu(dbs_data, cpu);
1119                 if (core_dbs_info->freq_lo) {
1120                         /* Setup timer for SUB_SAMPLE */
1121                         core_dbs_info->sample_type = OD_SUB_SAMPLE;
1122                         delay = core_dbs_info->freq_hi_jiffies;
1123                 }
1124         }
1125
1126 max_delay:
1127         if (!delay)
1128                 delay = delay_for_sampling_rate(sd_tuners->sampling_rate
1129                                 * core_dbs_info->rate_mult);
1130
1131         gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
1132         mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
1133 }
1134
1135 /************************** sysfs interface ************************/
1136 static struct common_dbs_data sd_dbs_cdata;
1137
1138 /**
1139  * update_sampling_rate - update sampling rate effective immediately if needed.
1140  * @new_rate: new sampling rate
1141  *
1142  * If new rate is smaller than the old, simply updating
1143  * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
1144  * original sampling_rate was 1 second and the requested new sampling rate is 10
1145  * ms because the user needs immediate reaction from ondemand governor, but not
1146  * sure if higher frequency will be required or not, then, the governor may
1147  * change the sampling rate too late; up to 1 second later. Thus, if we are
1148  * reducing the sampling rate, we need to make the new value effective
1149  * immediately.
1150  */
1151 static void update_sampling_rate(struct dbs_data *dbs_data,
1152                 unsigned int new_rate)
1153 {
1154         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1155         int cpu;
1156
1157         sd_tuners->sampling_rate = new_rate = max(new_rate,
1158                         dbs_data->min_sampling_rate);
1159
1160         for_each_online_cpu(cpu) {
1161                 struct cpufreq_policy *policy;
1162                 struct od_cpu_dbs_info_s *dbs_info;
1163                 unsigned long next_sampling, appointed_at;
1164
1165                 policy = cpufreq_cpu_get(cpu);
1166                 if (!policy)
1167                         continue;
1168                 if (policy->governor != &cpufreq_gov_sprdemand) {
1169                         cpufreq_cpu_put(policy);
1170                         continue;
1171                 }
1172                 dbs_info = &per_cpu(sd_cpu_dbs_info, cpu);
1173                 cpufreq_cpu_put(policy);
1174
1175                 mutex_lock(&dbs_info->cdbs.timer_mutex);
1176
1177                 if (!delayed_work_pending(&dbs_info->cdbs.work)) {
1178                         mutex_unlock(&dbs_info->cdbs.timer_mutex);
1179                         continue;
1180                 }
1181
1182                 next_sampling = jiffies + usecs_to_jiffies(new_rate);
1183                 appointed_at = dbs_info->cdbs.work.timer.expires;
1184
1185                 if (time_before(next_sampling, appointed_at)) {
1186
1187                         mutex_unlock(&dbs_info->cdbs.timer_mutex);
1188                         cancel_delayed_work_sync(&dbs_info->cdbs.work);
1189                         mutex_lock(&dbs_info->cdbs.timer_mutex);
1190
1191                         gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
1192                                         usecs_to_jiffies(new_rate), true);
1193
1194                 }
1195                 mutex_unlock(&dbs_info->cdbs.timer_mutex);
1196         }
1197 }
1198
1199 static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
1200                 size_t count)
1201 {
1202         unsigned int input;
1203         int ret;
1204         ret = sscanf(buf, "%u", &input);
1205         if (ret != 1)
1206                 return -EINVAL;
1207
1208         update_sampling_rate(dbs_data, input);
1209         return count;
1210 }
1211
1212 static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
1213                 size_t count)
1214 {
1215         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1216         unsigned int input;
1217         int ret;
1218         unsigned int j;
1219
1220         ret = sscanf(buf, "%u", &input);
1221         if (ret != 1)
1222                 return -EINVAL;
1223         sd_tuners->io_is_busy = !!input;
1224
1225         /* we need to re-evaluate prev_cpu_idle */
1226         for_each_online_cpu(j) {
1227                 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info,
1228                                                                         j);
1229                 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
1230                         &dbs_info->cdbs.prev_cpu_wall, sd_tuners->io_is_busy);
1231         }
1232         return count;
1233 }
1234
1235 static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
1236                 size_t count)
1237 {
1238         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1239         unsigned int input;
1240         int ret;
1241         ret = sscanf(buf, "%u", &input);
1242
1243         if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
1244                         input < MIN_FREQUENCY_UP_THRESHOLD) {
1245                 return -EINVAL;
1246         }
1247         /* Calculate the new adj_up_threshold */
1248         sd_tuners->adj_up_threshold += input;
1249         sd_tuners->adj_up_threshold -= sd_tuners->up_threshold;
1250
1251         sd_tuners->up_threshold = input;
1252         return count;
1253 }
1254
1255 static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
1256                 const char *buf, size_t count)
1257 {
1258         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1259         unsigned int input, j;
1260         int ret;
1261         ret = sscanf(buf, "%u", &input);
1262
1263         if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
1264                 return -EINVAL;
1265         sd_tuners->sampling_down_factor = input;
1266
1267         /* Reset down sampling multiplier in case it was active */
1268         for_each_online_cpu(j) {
1269                 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(sd_cpu_dbs_info,
1270                                 j);
1271                 dbs_info->rate_mult = 1;
1272         }
1273         return count;
1274 }
1275
1276 static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
1277                 size_t count)
1278 {
1279         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1280         unsigned int input;
1281         int ret;
1282
1283         unsigned int j;
1284
1285         ret = sscanf(buf, "%u", &input);
1286         if (ret != 1)
1287                 return -EINVAL;
1288
1289         if (input > 1)
1290                 input = 1;
1291
1292         if (input == sd_tuners->ignore_nice) { /* nothing to do */
1293                 return count;
1294         }
1295         sd_tuners->ignore_nice = input;
1296
1297         /* we need to re-evaluate prev_cpu_idle */
1298         for_each_online_cpu(j) {
1299                 struct od_cpu_dbs_info_s *dbs_info;
1300                 dbs_info = &per_cpu(sd_cpu_dbs_info, j);
1301                 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
1302                         &dbs_info->cdbs.prev_cpu_wall, sd_tuners->io_is_busy);
1303                 if (sd_tuners->ignore_nice)
1304                         dbs_info->cdbs.prev_cpu_nice =
1305                                 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
1306
1307         }
1308         return count;
1309 }
1310
1311 static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
1312                 size_t count)
1313 {
1314         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1315         unsigned int input;
1316         int ret;
1317         ret = sscanf(buf, "%u", &input);
1318
1319         if (ret != 1)
1320                 return -EINVAL;
1321
1322         if (input > 1000)
1323                 input = 1000;
1324
1325         sd_tuners->powersave_bias = input;
1326         sprdemand_powersave_bias_init();
1327         return count;
1328 }
1329
1330 static ssize_t store_cpu_num_limit(struct dbs_data *dbs_data, const char *buf,
1331                 size_t count)
1332 {
1333         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1334         unsigned int input;
1335         int ret;
1336         ret = sscanf(buf, "%u", &input);
1337
1338         if (ret != 1) {
1339                 return -EINVAL;
1340         }
1341         sd_tuners->cpu_num_limit = input;
1342         return count;
1343 }
1344
1345 static ssize_t store_cpu_score_up_threshold(struct dbs_data *dbs_data, const char *buf,
1346                 size_t count)
1347 {
1348         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1349         unsigned int input;
1350         int ret;
1351         ret = sscanf(buf, "%u", &input);
1352
1353         if (ret != 1) {
1354                 return -EINVAL;
1355         }
1356         sd_tuners->cpu_score_up_threshold = input;
1357         return count;
1358 }
1359
1360 static ssize_t store_load_critical(struct dbs_data *dbs_data, const char *buf,
1361                 size_t count)
1362 {
1363         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1364         unsigned int input;
1365         int ret;
1366         ret = sscanf(buf, "%u", &input);
1367
1368         if (ret != 1) {
1369                 return -EINVAL;
1370         }
1371         sd_tuners->load_critical = input;
1372         return count;
1373 }
1374
1375 static ssize_t store_load_hi(struct dbs_data *dbs_data, const char *buf,
1376                 size_t count)
1377 {
1378         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1379         unsigned int input;
1380         int ret;
1381         ret = sscanf(buf, "%u", &input);
1382
1383         if (ret != 1) {
1384                 return -EINVAL;
1385         }
1386         sd_tuners->load_hi = input;
1387         return count;
1388 }
1389
1390 static ssize_t store_load_mid(struct dbs_data *dbs_data, const char *buf,
1391                 size_t count)
1392 {
1393         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1394         unsigned int input;
1395         int ret;
1396         ret = sscanf(buf, "%u", &input);
1397
1398         if (ret != 1) {
1399                 return -EINVAL;
1400         }
1401         sd_tuners->load_mid = input;
1402         return count;
1403 }
1404
1405 static ssize_t store_load_light(struct dbs_data *dbs_data, const char *buf,
1406                 size_t count)
1407 {
1408         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1409         unsigned int input;
1410         int ret;
1411         ret = sscanf(buf, "%u", &input);
1412
1413         if (ret != 1) {
1414                 return -EINVAL;
1415         }
1416         sd_tuners->load_light = input;
1417         return count;
1418 }
1419
1420 static ssize_t store_load_lo(struct dbs_data *dbs_data, const char *buf,
1421                 size_t count)
1422 {
1423         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1424         unsigned int input;
1425         int ret;
1426         ret = sscanf(buf, "%u", &input);
1427
1428         if (ret != 1) {
1429                 return -EINVAL;
1430         }
1431         sd_tuners->load_lo = input;
1432         return count;
1433 }
1434
1435 static ssize_t store_load_critical_score(struct dbs_data *dbs_data, const char *buf,
1436                 size_t count)
1437 {
1438         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1439         int input;
1440         int ret;
1441         ret = sscanf(buf, "%d", &input);
1442
1443         if (ret != 1) {
1444                 return -EINVAL;
1445         }
1446         sd_tuners->load_critical_score = input;
1447         return count;
1448 }
1449
1450 static ssize_t store_load_hi_score(struct dbs_data *dbs_data, const char *buf,
1451                 size_t count)
1452 {
1453         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1454         int input;
1455         int ret;
1456         ret = sscanf(buf, "%d", &input);
1457
1458         if (ret != 1) {
1459                 return -EINVAL;
1460         }
1461         sd_tuners->load_hi_score = input;
1462         return count;
1463 }
1464
1465
1466 static ssize_t store_load_mid_score(struct dbs_data *dbs_data, const char *buf,
1467                 size_t count)
1468 {
1469         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1470         int input;
1471         int ret;
1472         ret = sscanf(buf, "%d", &input);
1473
1474         if (ret != 1) {
1475                 return -EINVAL;
1476         }
1477         sd_tuners->load_mid_score = input;
1478         return count;
1479 }
1480
1481 static ssize_t store_load_light_score(struct dbs_data *dbs_data, const char *buf,
1482                 size_t count)
1483 {
1484         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1485         int input;
1486         int ret;
1487         ret = sscanf(buf, "%d", &input);
1488
1489         if (ret != 1) {
1490                 return -EINVAL;
1491         }
1492         sd_tuners->load_light_score = input;
1493         return count;
1494 }
1495
1496 static ssize_t store_load_lo_score(struct dbs_data *dbs_data, const char *buf,
1497                 size_t count)
1498 {
1499         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1500         int input;
1501         int ret;
1502         ret = sscanf(buf, "%d", &input);
1503
1504         if (ret != 1) {
1505                 return -EINVAL;
1506         }
1507         sd_tuners->load_lo_score = input;
1508         return count;
1509 }
1510
1511 static ssize_t store_cpu_down_threshold(struct dbs_data *dbs_data, const char *buf,
1512                 size_t count)
1513 {
1514         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1515         unsigned int input;
1516         int ret;
1517         ret = sscanf(buf, "%u", &input);
1518
1519         if (ret != 1) {
1520                 return -EINVAL;
1521         }
1522         sd_tuners->cpu_down_threshold = input;
1523         return count;
1524 }
1525
1526 static ssize_t store_cpu_down_count(struct dbs_data *dbs_data, const char *buf,
1527                 size_t count)
1528 {
1529         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1530         unsigned int input;
1531         int ret;
1532         ret = sscanf(buf, "%u", &input);
1533
1534         if (ret != 1) {
1535                 return -EINVAL;
1536         }
1537         sd_tuners->cpu_down_count = input;
1538         return count;
1539 }
1540
1541 static ssize_t store_cpu_hotplug_disable(struct dbs_data *dbs_data, const char *buf,
1542                 size_t count)
1543 {
1544         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1545         unsigned int input, cpu;
1546         int ret;
1547         ret = sscanf(buf, "%u", &input);
1548
1549         if (ret != 1) {
1550                 return -EINVAL;
1551         }
1552
1553         if (sd_tuners->cpu_hotplug_disable == input) {
1554                 return count;
1555         }
1556         if (sd_tuners->cpu_num_limit > 1)
1557                 sd_tuners->cpu_hotplug_disable = input;
1558
1559         if (sd_tuners->cpu_hotplug_disable > 0)
1560                 cpu_hotplug_disable_set = true;
1561         else
1562                 cpu_hotplug_disable_set = false;
1563
1564         smp_wmb();
1565         /* plug-in all offline cpu mandatory if we didn't
1566          * enbale CPU_DYNAMIC_HOTPLUG
1567          */
1568 #ifdef CONFIG_HOTPLUG_CPU
1569         if (sd_tuners->cpu_hotplug_disable &&
1570                         num_online_cpus() < sd_tuners->cpu_num_limit) {
1571                 schedule_work_on(0, &plugin_all_work);
1572                 do {
1573                         msleep(5);
1574                         pr_debug("wait for all cpu online!\n");
1575                 } while (num_online_cpus() < sd_tuners->cpu_num_limit);
1576         }
1577 #endif
1578         return count;
1579 }
1580
1581 static ssize_t store_cpu_up_mid_threshold(struct dbs_data *dbs_data,
1582                 const char *buf, size_t count)
1583 {
1584         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1585         unsigned int input;
1586         int ret;
1587         ret = sscanf(buf, "%u", &input);
1588
1589         if (ret != 1)
1590                 return -EINVAL;
1591
1592         sd_tuners->cpu_up_mid_threshold = input;
1593         return count;
1594 }
1595
1596 static ssize_t store_cpu_up_high_threshold(struct dbs_data *dbs_data,
1597                 const char *buf, size_t count)
1598 {
1599         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1600         unsigned int input;
1601         int ret;
1602         ret = sscanf(buf, "%u", &input);
1603
1604         if (ret != 1)
1605                 return -EINVAL;
1606
1607         sd_tuners->cpu_up_high_threshold = input;
1608         return count;
1609 }
1610
1611 static ssize_t store_cpu_down_mid_threshold(struct dbs_data *dbs_data,
1612                 const char *buf, size_t count)
1613 {
1614         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1615         unsigned int input;
1616         int ret;
1617         ret = sscanf(buf, "%u", &input);
1618
1619         if (ret != 1)
1620                 return -EINVAL;
1621
1622         sd_tuners->cpu_down_mid_threshold = input;
1623         return count;
1624 }
1625
1626 static ssize_t store_cpu_down_high_threshold(struct dbs_data *dbs_data,
1627                 const char *buf, size_t count)
1628 {
1629         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1630         unsigned int input;
1631         int ret;
1632         ret = sscanf(buf, "%u", &input);
1633
1634         if (ret != 1)
1635                 return -EINVAL;
1636
1637         sd_tuners->cpu_down_high_threshold = input;
1638         return count;
1639 }
1640
1641 static ssize_t store_window_size(struct dbs_data *dbs_data,
1642                 const char *buf, size_t count)
1643 {
1644         struct sd_dbs_tuners *sd_tuners = dbs_data->tuners;
1645         unsigned int input;
1646         int ret;
1647         ret = sscanf(buf, "%u", &input);
1648
1649         if (ret != 1)
1650                 return -EINVAL;
1651
1652         if (input > MAX_ARRAY_SIZE || input < 1)
1653                 return -EINVAL;
1654
1655         sd_tuners->window_size = input;
1656         return count;
1657 }
1658
1659 show_store_one(sd, sampling_rate);
1660 show_store_one(sd, io_is_busy);
1661 show_store_one(sd, up_threshold);
1662 show_store_one(sd, sampling_down_factor);
1663 show_store_one(sd, ignore_nice);
1664 show_store_one(sd, powersave_bias);
1665 declare_show_sampling_rate_min(sd);
1666 show_store_one(sd, cpu_score_up_threshold);
1667 show_store_one(sd, load_critical);
1668 show_store_one(sd, load_hi);
1669 show_store_one(sd, load_mid);
1670 show_store_one(sd, load_light);
1671 show_store_one(sd, load_lo);
1672 show_store_one(sd, load_critical_score);
1673 show_store_one(sd, load_hi_score);
1674 show_store_one(sd, load_mid_score);
1675 show_store_one(sd, load_light_score);
1676 show_store_one(sd, load_lo_score);
1677 show_store_one(sd, cpu_down_threshold);
1678 show_store_one(sd, cpu_down_count);
1679 show_store_one(sd, cpu_hotplug_disable);
1680 show_store_one(sd, cpu_num_limit);
1681 show_store_one(sd, cpu_up_mid_threshold);
1682 show_store_one(sd, cpu_up_high_threshold);
1683 show_store_one(sd, cpu_down_mid_threshold);
1684 show_store_one(sd, cpu_down_high_threshold);
1685 show_store_one(sd, window_size);
1686
1687 gov_sys_pol_attr_rw(sampling_rate);
1688 gov_sys_pol_attr_rw(io_is_busy);
1689 gov_sys_pol_attr_rw(up_threshold);
1690 gov_sys_pol_attr_rw(sampling_down_factor);
1691 gov_sys_pol_attr_rw(ignore_nice);
1692 gov_sys_pol_attr_rw(powersave_bias);
1693 gov_sys_pol_attr_ro(sampling_rate_min);
1694 gov_sys_pol_attr_rw(cpu_score_up_threshold);
1695 gov_sys_pol_attr_rw(load_critical);
1696 gov_sys_pol_attr_rw(load_hi);
1697 gov_sys_pol_attr_rw(load_mid);
1698 gov_sys_pol_attr_rw(load_light);
1699 gov_sys_pol_attr_rw(load_lo);
1700 gov_sys_pol_attr_rw(load_critical_score);
1701 gov_sys_pol_attr_rw(load_hi_score);
1702 gov_sys_pol_attr_rw(load_mid_score);
1703 gov_sys_pol_attr_rw(load_light_score);
1704 gov_sys_pol_attr_rw(load_lo_score);
1705 gov_sys_pol_attr_rw(cpu_down_threshold);
1706 gov_sys_pol_attr_rw(cpu_down_count);
1707 gov_sys_pol_attr_rw(cpu_hotplug_disable);
1708 gov_sys_pol_attr_rw(cpu_num_limit);
1709 gov_sys_pol_attr_rw(cpu_up_mid_threshold);
1710 gov_sys_pol_attr_rw(cpu_up_high_threshold);
1711 gov_sys_pol_attr_rw(cpu_down_mid_threshold);
1712 gov_sys_pol_attr_rw(cpu_down_high_threshold);
1713 gov_sys_pol_attr_rw(window_size);
1714
1715 static struct attribute *dbs_attributes_gov_sys[] = {
1716         &sampling_rate_min_gov_sys.attr,
1717         &sampling_rate_gov_sys.attr,
1718         &up_threshold_gov_sys.attr,
1719         &sampling_down_factor_gov_sys.attr,
1720         &ignore_nice_gov_sys.attr,
1721         &powersave_bias_gov_sys.attr,
1722         &io_is_busy_gov_sys.attr,
1723         &cpu_score_up_threshold_gov_sys.attr,
1724         &load_critical_gov_sys.attr,
1725         &load_hi_gov_sys.attr,
1726         &load_mid_gov_sys.attr,
1727         &load_light_gov_sys.attr,
1728         &load_lo_gov_sys.attr,
1729         &load_critical_score_gov_sys.attr,
1730         &load_hi_score_gov_sys.attr,
1731         &load_mid_score_gov_sys.attr,
1732         &load_light_score_gov_sys.attr,
1733         &load_lo_score_gov_sys.attr,
1734         &cpu_down_threshold_gov_sys.attr,
1735         &cpu_down_count_gov_sys.attr,
1736         &cpu_hotplug_disable_gov_sys.attr,
1737         &cpu_num_limit_gov_sys.attr,
1738         &cpu_up_mid_threshold_gov_sys.attr,
1739         &cpu_up_high_threshold_gov_sys.attr,
1740         &cpu_down_mid_threshold_gov_sys.attr,
1741         &cpu_down_high_threshold_gov_sys.attr,
1742         &window_size_gov_sys.attr,
1743         NULL
1744 };
1745
1746 static struct attribute_group sd_attr_group_gov_sys = {
1747         .attrs = dbs_attributes_gov_sys,
1748         .name = "sprdemand",
1749 };
1750
1751 static struct attribute *dbs_attributes_gov_pol[] = {
1752         &sampling_rate_min_gov_pol.attr,
1753         &sampling_rate_gov_pol.attr,
1754         &up_threshold_gov_pol.attr,
1755         &sampling_down_factor_gov_pol.attr,
1756         &ignore_nice_gov_pol.attr,
1757         &powersave_bias_gov_pol.attr,
1758         &io_is_busy_gov_pol.attr,
1759         &cpu_score_up_threshold_gov_pol.attr,
1760         &load_critical_gov_pol.attr,
1761         &load_hi_gov_pol.attr,
1762         &load_mid_gov_pol.attr,
1763         &load_light_gov_pol.attr,
1764         &load_lo_gov_pol.attr,
1765         &load_critical_score_gov_pol.attr,
1766         &load_hi_score_gov_pol.attr,
1767         &load_mid_score_gov_pol.attr,
1768         &load_light_score_gov_pol.attr,
1769         &load_lo_score_gov_pol.attr,
1770         &cpu_down_threshold_gov_pol.attr,
1771         &cpu_down_count_gov_pol.attr,
1772         &cpu_hotplug_disable_gov_pol.attr,
1773         &cpu_num_limit_gov_pol.attr,
1774         &cpu_up_mid_threshold_gov_pol.attr,
1775         &cpu_up_high_threshold_gov_pol.attr,
1776         &cpu_down_mid_threshold_gov_pol.attr,
1777         &cpu_down_high_threshold_gov_pol.attr,
1778         &window_size_gov_pol.attr,
1779         NULL
1780 };
1781
1782 static struct attribute_group sd_attr_group_gov_pol = {
1783         .attrs = dbs_attributes_gov_pol,
1784         .name = "sprdemand",
1785 };
1786
1787 /************************** sysfs end ************************/
1788
1789 static int sd_init(struct dbs_data *dbs_data)
1790 {
1791         struct sd_dbs_tuners *tuners;
1792         u64 idle_time;
1793         int cpu, i;
1794
1795         tuners = kzalloc(sizeof(struct sd_dbs_tuners), GFP_KERNEL);
1796
1797         if (!tuners) {
1798                 pr_err("%s: kzalloc failed\n", __func__);
1799                 return -ENOMEM;
1800         }
1801
1802         cpu = get_cpu();
1803         idle_time = get_cpu_idle_time_us(cpu, NULL);
1804         put_cpu();
1805         if (idle_time != -1ULL) {
1806                 /* Idle micro accounting is supported. Use finer thresholds */
1807                 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1808                 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
1809                         MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
1810                 /*
1811                  * In nohz/micro accounting case we set the minimum frequency
1812                  * not depending on HZ, but fixed (very low). The deferred
1813                  * timer might skip some samples if idle/sleeping as needed.
1814                 */
1815                 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1816         } else {
1817                 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
1818                 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
1819                         DEF_FREQUENCY_DOWN_DIFFERENTIAL;
1820
1821                 /* For correct statistics, we need 10 ticks for each measure */
1822                 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
1823                         jiffies_to_usecs(10);
1824         }
1825
1826         tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
1827         tuners->ignore_nice = 0;
1828         tuners->powersave_bias = 0;
1829         tuners->io_is_busy = should_io_be_busy();
1830
1831         tuners->cpu_hotplug_disable = true;
1832         tuners->is_suspend = false;
1833         tuners->cpu_score_up_threshold = DEF_CPU_SCORE_UP_THRESHOLD;
1834         tuners->load_critical = LOAD_CRITICAL;
1835         tuners->load_hi = LOAD_HI;
1836         tuners->load_mid = LOAD_MID;
1837         tuners->load_light = LOAD_LIGHT;
1838         tuners->load_lo = LOAD_LO;
1839         tuners->load_critical_score = LOAD_CRITICAL_SCORE;
1840         tuners->load_hi_score = LOAD_HI_SCORE;
1841         tuners->load_mid_score = LOAD_MID_SCORE;
1842         tuners->load_light_score = LOAD_LIGHT_SCORE;
1843         tuners->load_lo_score = LOAD_LO_SCORE;
1844         tuners->cpu_down_threshold = DEF_CPU_LOAD_DOWN_THRESHOLD;
1845         tuners->cpu_down_count = DEF_CPU_DOWN_COUNT;
1846         tuners->cpu_up_mid_threshold = DEF_CPU_UP_MID_THRESHOLD;
1847         tuners->cpu_up_high_threshold = DEF_CPU_UP_HIGH_THRESHOLD;
1848         tuners->cpu_down_mid_threshold = DEF_CPU_DOWN_MID_THRESHOLD;
1849         tuners->cpu_down_high_threshold = DEF_CPU_DOWN_HIGH_THRESHOLD;
1850         tuners->window_size = LOAD_WINDOW_SIZE;
1851         tuners->cpu_num_limit = nr_cpu_ids;
1852         if (tuners->cpu_num_limit > 1)
1853                 tuners->cpu_hotplug_disable = false;
1854
1855         memcpy(g_sd_tuners,tuners,sizeof(struct sd_dbs_tuners));
1856
1857         dbs_data->tuners = tuners;
1858         mutex_init(&dbs_data->mutex);
1859
1860         INIT_DELAYED_WORK(&plugin_work, sprd_plugin_one_cpu);
1861         INIT_DELAYED_WORK(&unplug_work, sprd_unplug_one_cpu);
1862         INIT_WORK(&thm_unplug_work, sprd_thm_unplug_cpu);
1863         INIT_WORK(&plugin_all_work, sprd_plugin_all_cpu);
1864         INIT_WORK(&unplug_all_work, sprd_unplug_all_cpu);
1865
1866 #if 0
1867         for_each_possible_cpu(i) {
1868                 puwi = &per_cpu(uwi, i);
1869                 puwi->cpuid = i;
1870                 puwi->dbs_data = dbs_data;
1871                 INIT_DELAYED_WORK(&puwi->unplug_work, sprd_unplug_one_cpu);
1872         }
1873 #endif
1874
1875         return 0;
1876 }
1877
1878 static void sd_exit(struct dbs_data *dbs_data)
1879 {
1880         kfree(dbs_data->tuners);
1881 }
1882
1883 define_get_cpu_dbs_routines(sd_cpu_dbs_info);
1884
1885 static struct od_ops sd_ops = {
1886         .powersave_bias_init_cpu = sprdemand_powersave_bias_init_cpu,
1887         .powersave_bias_target = generic_powersave_bias_target,
1888         .freq_increase = dbs_freq_increase,
1889 };
1890
1891 static struct common_dbs_data sd_dbs_cdata = {
1892         /* sprdemand belong to ondemand gov */
1893         .governor = GOV_ONDEMAND,
1894         .attr_group_gov_sys = &sd_attr_group_gov_sys,
1895         .attr_group_gov_pol = &sd_attr_group_gov_pol,
1896         .get_cpu_cdbs = get_cpu_cdbs,
1897         .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
1898         .gov_dbs_timer = sd_dbs_timer,
1899         .gov_check_cpu = sd_check_cpu,
1900         .gov_ops = &sd_ops,
1901         .init = sd_init,
1902         .exit = sd_exit,
1903 };
1904
1905 static int sd_cpufreq_governor_dbs(struct cpufreq_policy *policy,
1906                 unsigned int event)
1907 {
1908         return cpufreq_governor_dbs(policy, &sd_dbs_cdata, event);
1909 }
1910
1911 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
1912 static
1913 #endif
1914 struct cpufreq_governor cpufreq_gov_sprdemand = {
1915         .name                   = "sprdemand",
1916         .governor               = sd_cpufreq_governor_dbs,
1917         .max_transition_latency = TRANSITION_LATENCY_LIMIT,
1918         .owner                  = THIS_MODULE,
1919 };
1920
1921 static void sprd_thm_unplug_cpu(struct work_struct *work)
1922 {
1923         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
1924         struct dbs_data *dbs_data = policy->governor_data;
1925         struct sd_dbs_tuners *sd_tuners = NULL;
1926         int cpuid, max_core, cpus, i;
1927
1928         if(NULL == dbs_data)
1929         {
1930                 pr_info("%s return\n", __func__);
1931                 if (g_sd_tuners == NULL)
1932                         return ;
1933                 sd_tuners = g_sd_tuners;
1934         }
1935         else
1936         {
1937                 sd_tuners = dbs_data->tuners;
1938         }
1939
1940 #ifdef CONFIG_HOTPLUG_CPU
1941         cpus = num_online_cpus();
1942         max_core = sd_tuners->cpu_num_limit;
1943         for (i = 0; i < cpus - max_core; ++i){
1944                 if (!sd_tuners->cpu_hotplug_disable) {
1945                         cpuid = cpumask_next(0, cpu_online_mask);
1946                         pr_info("!!  we gonna unplug cpu%d  !!\n", cpuid);
1947                         if (cpu_down(cpuid)){
1948                                 pr_info("unplug cpu%d failed!\n", cpuid);
1949                         }
1950                 }
1951         }
1952 #endif
1953         return;
1954 }
1955
1956 static int sprdemand_gov_pm_notifier_call(struct notifier_block *nb,
1957         unsigned long event, void *dummy)
1958 {
1959         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
1960         struct dbs_data *dbs_data = policy->governor_data;
1961         struct sd_dbs_tuners *sd_tuners = NULL;
1962
1963         if (NULL == dbs_data) {
1964                 pr_info("sprdemand_gov_pm_notifier_call governor %s return\n", policy->governor->name);
1965                 if (g_sd_tuners == NULL)
1966                         return NOTIFY_OK;
1967                 sd_tuners = g_sd_tuners;
1968         } else {
1969                 sd_tuners = dbs_data->tuners;
1970         }
1971
1972         /* in suspend and hibernation process, we need set frequency to the orignal
1973          * one to make sure all things go right */
1974         if (event == PM_SUSPEND_PREPARE || event == PM_HIBERNATION_PREPARE) {
1975                 pr_info(" %s, recv pm suspend notify\n", __func__ );
1976                 cpu_num_limit_temp = sd_tuners->cpu_num_limit;
1977                 sd_tuners->cpu_num_limit = 1;
1978
1979                 if (!sd_tuners->cpu_hotplug_disable)
1980                         schedule_work_on(0, &unplug_all_work);
1981                 cpufreq_driver_target(policy, 1000000, CPUFREQ_RELATION_H);
1982
1983                 sd_tuners->is_suspend = true;
1984                 g_is_suspend = true;
1985                 pr_info(" %s, recv pm suspend notify done\n", __func__ );
1986         }
1987         if (event == PM_POST_SUSPEND) {
1988                 sd_tuners->is_suspend = false;
1989                 g_is_suspend = false;
1990                 sd_tuners->cpu_num_limit = cpu_num_limit_temp ;
1991         }
1992
1993         return NOTIFY_OK;
1994 }
1995
1996 static struct notifier_block sprdemand_gov_pm_notifier = {
1997         .notifier_call = sprdemand_gov_pm_notifier_call,
1998 };
1999
2000 #ifdef CONFIG_TOUCH_BOOST
2001 static void dbs_refresh_callback(struct work_struct *work)
2002 {
2003         unsigned int cpu = smp_processor_id();
2004         struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(sd_cpu_dbs_info,
2005                         cpu);
2006         struct cpufreq_policy *policy;
2007
2008         policy = core_dbs_info->cdbs.cur_policy;
2009
2010         if (!policy || g_is_suspend) {
2011                 return;
2012         }
2013
2014         if (policy->cur < policy->max) {
2015                 cpufreq_driver_target(policy,
2016                                 policy->max, CPUFREQ_RELATION_H);
2017                 atomic_add(5, &g_atomic_tb_cnt);
2018
2019                 core_dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(cpu,
2020                                 &core_dbs_info->cdbs.prev_cpu_wall,
2021                                 should_io_be_busy());
2022         }
2023 }
2024
2025 static void dbs_input_event(struct input_handle *handle, unsigned int type,
2026                 unsigned int code, int value)
2027 {
2028         int i;
2029         bool ret;
2030
2031         if (time_before(jiffies, boot_done))
2032                 return;
2033
2034         if (strcmp(handle->dev->name, "focaltech_ts")&&
2035             strcmp(handle->dev->name,"msg2138_ts"))
2036                 return;
2037
2038         if (time_after(jiffies, tp_time) && !atomic_read(&g_atomic_tb_cnt))
2039                 tp_time = jiffies + HZ / 2;
2040         else
2041                 return;
2042
2043         up(&tb_sem);
2044
2045 #if 0
2046         if (!dvfs_plug_select)
2047                 return;
2048
2049         if (jiffies <= (tp_time + 10)) {
2050                 tp_time = jiffies;
2051                 return;
2052         }
2053         tp_time = jiffies;
2054         ret = queue_work_on(0, input_wq, &dbs_refresh_work);
2055         pr_debug("[DVFS] dbs_input_event %d\n",ret);
2056 #endif
2057 }
2058
2059 static int dbs_input_connect(struct input_handler *handler,
2060                 struct input_dev *dev, const struct input_device_id *id)
2061 {
2062         struct input_handle *handle;
2063         int error;
2064
2065         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
2066         if (!handle)
2067                 return -ENOMEM;
2068
2069         handle->dev = dev;
2070         handle->handler = handler;
2071         handle->name = "cpufreq";
2072
2073         error = input_register_handle(handle);
2074         if (error)
2075                 goto err2;
2076
2077         error = input_open_device(handle);
2078         if (error)
2079                 goto err1;
2080
2081         pr_debug("[DVFS] dbs_input_connect register success\n");
2082         return 0;
2083 err1:
2084         pr_info("[DVFS] dbs_input_connect register fail err1\n");
2085         input_unregister_handle(handle);
2086 err2:
2087         pr_info("[DVFS] dbs_input_connect register fail err2\n");
2088         kfree(handle);
2089         return error;
2090 }
2091
2092 static void dbs_input_disconnect(struct input_handle *handle)
2093 {
2094         input_close_device(handle);
2095         input_unregister_handle(handle);
2096         kfree(handle);
2097 }
2098
2099 static const struct input_device_id dbs_ids[] = {
2100         { .driver_info = 1 },
2101         { },
2102 };
2103
2104 void sprd_tb_thread()
2105 {
2106         while (1) {
2107                 down(&tb_sem);
2108                 dbs_refresh_callback(NULL);
2109                 if (num_online_cpus() < 3)
2110                         schedule_delayed_work_on(0, &plugin_work, 0);
2111         }
2112 }
2113
2114 struct input_handler dbs_input_handler = {
2115         .event          = dbs_input_event,
2116         .connect        = dbs_input_connect,
2117         .disconnect     = dbs_input_disconnect,
2118         .name           = "cpufreq_ond",
2119         .id_table       = dbs_ids,
2120 };
2121 #endif
2122
2123 static struct task_struct *ksprd_tb;
2124 static int __init cpufreq_gov_dbs_init(void)
2125 {
2126         int i = 0;
2127         boot_done = jiffies + GOVERNOR_BOOT_TIME;
2128 #if !(defined(CONFIG_MACH_SP9838AEA_5MOD) || defined(CONFIG_MACH_SP9838AEA_4CORE) || defined(CONFIG_MACH_SP9838AEA_8CORE_LIGHT_SLEEP) || defined(CONFIG_MACH_SS_SHARKLT8))
2129         register_pm_notifier(&sprdemand_gov_pm_notifier);
2130 #endif
2131         g_sd_tuners = kzalloc(sizeof(struct sd_dbs_tuners), GFP_KERNEL);
2132
2133 #ifdef CONFIG_TOUCH_BOOST
2134 #if 0
2135         input_wq = alloc_workqueue("iewq", WQ_MEM_RECLAIM|WQ_SYSFS, 1);
2136
2137         if (!input_wq)
2138         {
2139                 printk(KERN_ERR "Failed to create iewq workqueue\n");
2140                 return -EFAULT;
2141         }
2142
2143         INIT_WORK(&dbs_refresh_work, dbs_refresh_callback);
2144 #endif
2145         tp_time = jiffies;
2146
2147         if(input_register_handler(&dbs_input_handler))
2148         {
2149                 pr_err("[DVFS] input_register_handler failed\n");
2150         }
2151
2152         sema_init(&tb_sem, 0);
2153
2154         ksprd_tb = kthread_create(sprd_tb_thread, NULL, "sprd_tb_thread");
2155
2156         wake_up_process(ksprd_tb);
2157 #endif
2158
2159         return cpufreq_register_governor(&cpufreq_gov_sprdemand);
2160 }
2161
2162 static void __exit cpufreq_gov_dbs_exit(void)
2163 {
2164         cpufreq_unregister_governor(&cpufreq_gov_sprdemand);
2165         unregister_pm_notifier(&sprdemand_gov_pm_notifier);
2166
2167 #ifdef CONFIG_TOUCH_BOOST
2168         input_unregister_handler(&dbs_input_handler);
2169         kthread_stop(ksprd_tb);
2170 #endif
2171 }
2172
2173 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
2174 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
2175 MODULE_DESCRIPTION("'cpufreq_sprdemand' - A dynamic cpufreq governor for "
2176         "Low Latency Frequency Transition capable processors");
2177 MODULE_LICENSE("GPL");
2178
2179 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
2180 fs_initcall(cpufreq_gov_dbs_init);
2181 #else
2182 module_init(cpufreq_gov_dbs_init);
2183 #endif
2184 module_exit(cpufreq_gov_dbs_exit);