2 * Copyright (C) 2013 Spreadtrum Communications Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/sched.h>
20 #include <linux/cpufreq.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/err.h>
24 #include <linux/clk.h>
26 #include <linux/debugfs.h>
27 #include <linux/cpu.h>
28 #include <linux/regulator/consumer.h>
29 //#include <asm/system.h>
30 #include <trace/events/power.h>
32 #include <soc/sprd/hardware.h>
33 #include <soc/sprd/regulator.h>
34 #include <soc/sprd/adi.h>
35 #include <soc/sprd/sci.h>
36 #include <soc/sprd/sci_glb_regs.h>
37 #include <soc/sprd/arch_misc.h>
38 #include <linux/pm_qos.h>
39 #include <linux/of_platform.h>
41 #if defined(CONFIG_ARCH_SC8825)
43 #define GR_MPLL_REFIN_2M (2 * MHz)
44 #define GR_MPLL_REFIN_4M (4 * MHz)
45 #define GR_MPLL_REFIN_13M (13 * MHz)
46 #define GR_MPLL_REFIN_SHIFT 16
47 #define GR_MPLL_REFIN_MASK (0x3)
48 #define GR_MPLL_N_MASK (0x7ff)
49 #define GR_MPLL_MN (REG_GLB_M_PLL_CTL0)
50 #define GR_GEN1 (REG_GLB_GEN1)
53 #define FREQ_TABLE_SIZE 10
54 #define DVFS_BOOT_TIME (30 * HZ)
55 #define SHARK_TDPLL_FREQUENCY (768000)
56 #define TRANSITION_LATENCY (50 * 1000) /* ns */
58 static DEFINE_MUTEX(freq_lock);
59 struct cpufreq_freqs global_freqs;
60 unsigned int percpu_target[CONFIG_NR_CPUS] = {0};
61 static unsigned long boot_done;
62 static unsigned int sprd_top_frequency; /* khz */
68 struct regulator *regulator;
69 struct cpufreq_frequency_table *freq_tbl;
70 unsigned int *vddarm_mv;
71 unsigned int max_axi_freq;
74 struct cpufreq_table_data {
75 struct cpufreq_frequency_table freq_tbl[FREQ_TABLE_SIZE];
76 unsigned int vddarm_mv[FREQ_TABLE_SIZE];
79 struct cpufreq_conf *sprd_cpufreq_conf = NULL;
80 static struct mutex cpufreq_vddarm_lock;
82 #if defined(CONFIG_ARCH_SC8825)
83 static struct cpufreq_table_data sc8825_cpufreq_table_data = {
87 {2, CPUFREQ_TABLE_END}
94 struct cpufreq_conf sc8825_cpufreq_conf = {
97 .freq_tbl = sc8825_cpufreq_table_data.freq_tbl,
98 .vddarm_mv = sc8825_cpufreq_table_data.vddarm_mv,
101 static void set_mcu_clk_freq(u32 mcu_freq)
103 u32 val, rate, arm_clk_div, gr_gen1;
105 rate = mcu_freq / MHz;
115 panic("set_mcu_clk_freq fault\n");
118 pr_debug("%s --- before, AHB_ARM_CLK: %08x, rate = %d, div = %d\n",
119 __func__, __raw_readl(REG_AHB_ARM_CLK), rate, arm_clk_div);
121 gr_gen1 = __raw_readl(GR_GEN1);
123 __raw_writel(gr_gen1, GR_GEN1);
125 val = __raw_readl(REG_AHB_ARM_CLK);
128 __raw_writel(val, REG_AHB_ARM_CLK);
131 __raw_writel(gr_gen1, GR_GEN1);
133 pr_debug("%s --- after, AHB_ARM_CLK: %08x, rate = %d, div = %d\n",
134 __func__, __raw_readl(REG_AHB_ARM_CLK), rate, arm_clk_div);
139 static unsigned int get_mcu_clk_freq(void)
141 u32 mpll_refin, mpll_n, mpll_cfg = 0, rate, val;
143 mpll_cfg = __raw_readl(GR_MPLL_MN);
145 mpll_refin = (mpll_cfg >> GR_MPLL_REFIN_SHIFT) & GR_MPLL_REFIN_MASK;
148 mpll_refin = GR_MPLL_REFIN_2M;
152 mpll_refin = GR_MPLL_REFIN_4M;
155 mpll_refin = GR_MPLL_REFIN_13M;
158 pr_err("%s mpll_refin: %d\n", __FUNCTION__, mpll_refin);
160 mpll_n = mpll_cfg & GR_MPLL_N_MASK;
161 rate = mpll_refin * mpll_n;
164 val = __raw_readl(REG_AHB_ARM_CLK) & 0x7;
170 static struct cpufreq_table_data sc8830_cpufreq_table_data_cs = {
174 {2, SHARK_TDPLL_FREQUENCY},
176 {4, CPUFREQ_TABLE_END},
190 static struct cpufreq_table_data sc7715_cpufreq_table_data = {
193 {1, SHARK_TDPLL_FREQUENCY},
195 {3, SHARK_TDPLL_FREQUENCY/2},
196 {4, CPUFREQ_TABLE_END},
208 static struct cpufreq_table_data sc8830_cpufreq_table_data_es = {
211 {1, SHARK_TDPLL_FREQUENCY},
212 {2, CPUFREQ_TABLE_END},
221 static struct cpufreq_table_data sc8830t_cpufreq_table_data_es = {
226 {3, SHARK_TDPLL_FREQUENCY},
227 {4, CPUFREQ_TABLE_END},
238 static struct cpufreq_table_data sc8830t_cpufreq_table_data_es_1300 = {
243 {3, SHARK_TDPLL_FREQUENCY},
244 {4, CPUFREQ_TABLE_END},
255 static struct cpufreq_table_data sc9630_cpufreq_table_data = {
261 {4, CPUFREQ_TABLE_END},
272 static struct cpufreq_table_data sc7720_cpufreq_table_data = {
276 {2, CPUFREQ_TABLE_END},
285 static struct cpufreq_table_data sc9631l64_cpufreq_table_data_es = {
289 {2, SHARK_TDPLL_FREQUENCY},
290 {3, CPUFREQ_TABLE_END},
299 static struct cpufreq_table_data sc9820_cpufreq_table_data = {
303 {2, CPUFREQ_TABLE_END},
311 struct cpufreq_conf sc8830_cpufreq_conf = {
320 int cpufreq_table_thermal_update(unsigned int freq, unsigned int voltage)
322 struct cpufreq_frequency_table *freq_tbl;
323 unsigned int *vddarm;
326 if (NULL == sprd_cpufreq_conf)
328 freq_tbl = sprd_cpufreq_conf->freq_tbl;
329 vddarm = sprd_cpufreq_conf->vddarm_mv;
330 if (NULL == freq_tbl && NULL == vddarm)
333 for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; ++i) {
334 if (freq_tbl[i].frequency == freq)
337 pr_err(KERN_ERR "%s cpufreq %dMHz isn't find!\n", __func__, freq);
340 printk(KERN_ERR "%s: %dMHz voltage is %dmV\n",
341 __func__, freq, voltage);
342 if (vddarm[i] == voltage)
345 mutex_lock(&cpufreq_vddarm_lock);
347 mutex_unlock(&cpufreq_vddarm_lock);
352 static unsigned int sprd_raw_get_cpufreq(void)
354 #if defined(CONFIG_ARCH_SCX35)
355 return clk_get_rate(sprd_cpufreq_conf->clk) / 1000;
356 #elif defined(CONFIG_ARCH_SC8825)
357 return get_mcu_clk_freq() / 1000;
361 unsigned int last_freq = 0;
362 static void dump_axi_cpu(unsigned int freq)
366 #ifndef CONFIG_ARCH_SCX35L
367 div = sci_glb_read(REG_AP_AHB_CA7_CKG_CFG, -1UL);
370 axi_freq = freq / (div + 1);
371 #elif defined CONFIG_ARCH_SCX35LT8
372 div = sci_glb_read(REG_AP_AHB_CA7_CKG_DIV_CFG, -1UL);
375 axi_freq = freq / (div + 1);
377 printk("%s(%d): cpu_freq %d, div %d, axi_freq %d\n", __func__, __LINE__,
378 freq, div, axi_freq);
381 static inline int get_axi_div(unsigned int freq)
383 if (freq % sprd_cpufreq_conf->max_axi_freq)
384 return (freq / sprd_cpufreq_conf->max_axi_freq) + 1;
386 return freq / sprd_cpufreq_conf->max_axi_freq;
389 static void cpufreq_adjust_axi_clk(unsigned int freq)
391 struct clk *clk_axi = NULL;
392 int div = 0, div_old = 0;
394 char clk_name[50] = {0};
396 if (last_freq == 0) {
401 #ifndef CONFIG_ARCH_SCX35L
402 strcpy(clk_name, "clk_ca7_axi");
404 // for T8, in cpufreq-dt-sprd.c ?
411 div = get_axi_div(freq);
412 div_old = get_axi_div(last_freq);
413 if (!need_adjust && (div != div_old))
422 clk_axi = clk_get_sys(NULL, clk_name);
423 if (IS_ERR_OR_NULL(clk_axi)) {
424 pr_err("%s(%d) err: cannot find clock %s\n", __func__, __LINE__, clk_name);
428 if (clk_set_rate(clk_axi, freq * 1000 / div))
429 pr_err("%s(%d) err: clk_set_rate failed\n", __func__, __LINE__);
432 //dump_axi_cpu(freq);
435 static void cpufreq_set_clock(unsigned int freq)
439 ret = clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->tdpllclk);
441 pr_err("Failed to set cpu parent to tdpll\n");
442 if (freq == SHARK_TDPLL_FREQUENCY/2) {
444 #ifndef CONFIG_ARCH_SCX35L
445 sci_glb_set(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
447 #ifndef CONFIG_ARCH_SCX35LT8 //TODO
448 sci_glb_set(REG_AP_AHB_CA7_CKG_DIV_CFG, BITS_CA7_MCU_CKG_DIV(1));
451 } else if (freq == SHARK_TDPLL_FREQUENCY) {
452 #ifndef CONFIG_ARCH_SCX35L
453 sci_glb_clr(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
455 #ifndef CONFIG_ARCH_SCX35LT8 //TODO
456 sci_glb_clr(REG_AP_AHB_CA7_CKG_DIV_CFG, BITS_CA7_MCU_CKG_DIV(1));
461 if (clk_get_parent(sprd_cpufreq_conf->clk) != sprd_cpufreq_conf->tdpllclk) {
462 ret = clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->tdpllclk);
464 pr_err("Failed to set cpu parent to tdpll\n");
467 if (!(sci_glb_read(REG_PMU_APB_MPLL_REL_CFG, -1) & BIT_MPLL_AP_SEL)) {
468 sci_glb_set(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
471 ret = clk_set_rate(sprd_cpufreq_conf->mpllclk, (freq * 1000));
473 pr_err("Failed to set mpll rate\n");
474 ret = clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->mpllclk);
476 pr_err("Failed to set cpu parent to mpll\n");
477 #ifndef CONFIG_ARCH_SCX35L
478 sci_glb_clr(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
480 #ifndef CONFIG_ARCH_SCX35LT8 //TODO
481 sci_glb_clr(REG_AP_AHB_CA7_CKG_DIV_CFG, BITS_CA7_MCU_CKG_DIV(1));
486 cpufreq_adjust_axi_clk(freq);
488 static void sprd_raw_set_cpufreq(int cpu, struct cpufreq_freqs *freq, int index)
490 #if defined(CONFIG_ARCH_SCX35)
493 #define CPUFREQ_SET_VOLTAGE() \
495 mutex_lock(&cpufreq_vddarm_lock); \
496 ret = regulator_set_voltage(sprd_cpufreq_conf->regulator, \
497 sprd_cpufreq_conf->vddarm_mv[index], \
498 sprd_cpufreq_conf->vddarm_mv[index]); \
499 mutex_unlock(&cpufreq_vddarm_lock); \
501 pr_err("Failed to set vdd to %d mv\n", \
502 sprd_cpufreq_conf->vddarm_mv[index]); \
504 #define CPUFREQ_SET_CLOCK() \
506 if (freq->new == SHARK_TDPLL_FREQUENCY) { \
507 ret = clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->tdpllclk); \
509 pr_err("Failed to set cpu parent to tdpll\n"); \
511 if (clk_get_parent(sprd_cpufreq_conf->clk) != sprd_cpufreq_conf->tdpllclk) { \
512 ret = clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->tdpllclk); \
514 pr_err("Failed to set cpu parent to tdpll\n"); \
516 ret = clk_set_rate(sprd_cpufreq_conf->mpllclk, (freq->new * 1000)); \
518 pr_err("Failed to set mpll rate\n"); \
519 ret = clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->mpllclk); \
521 pr_err("Failed to set cpu parent to mpll\n"); \
524 trace_cpu_frequency(freq->new, cpu);
526 if (freq->new >= sprd_raw_get_cpufreq()) {
527 CPUFREQ_SET_VOLTAGE();
528 cpufreq_set_clock(freq->new);
530 cpufreq_set_clock(freq->new);
531 CPUFREQ_SET_VOLTAGE();
534 pr_debug("%u --> %u, real=%u, index=%d\n",
535 freq->old, freq->new, sprd_raw_get_cpufreq(), index);
537 #undef CPUFREQ_SET_VOLTAGE
538 #undef CPUFREQ_SET_CLOCK
540 #elif defined(CONFIG_ARCH_SC8825)
541 set_mcu_clk_freq(freq->new * 1000);
546 static void sprd_real_set_cpufreq(struct cpufreq_policy *policy, unsigned int new_speed, int index)
548 mutex_lock(&freq_lock);
550 if (global_freqs.old == new_speed) {
551 pr_debug("do nothing for cpu%u, new=old=%u\n",
552 policy->cpu, new_speed);
553 mutex_unlock(&freq_lock);
557 pr_info("--xing-- set %u khz for cpu%u\n",
558 new_speed, policy->cpu);
560 global_freqs.cpu = policy->cpu;
561 global_freqs.new = new_speed;
563 cpufreq_notify_transition(policy, &global_freqs, CPUFREQ_PRECHANGE);
565 sprd_raw_set_cpufreq(policy->cpu, &global_freqs, index);
567 cpufreq_notify_transition(policy, &global_freqs, CPUFREQ_POSTCHANGE);
569 global_freqs.old = global_freqs.new;
571 mutex_unlock(&freq_lock);
575 static void sprd_find_real_index(unsigned int new_speed, int *index)
578 struct cpufreq_frequency_table *pfreq = sprd_cpufreq_conf->freq_tbl;
580 *index = pfreq[0].index;
581 for (i = 0; (pfreq[i].frequency != CPUFREQ_TABLE_END); i++) {
582 if (new_speed == pfreq[i].frequency) {
583 *index = pfreq[i].index;
590 static int sprd_update_cpu_speed(struct cpufreq_policy *policy,
591 unsigned int target_speed, int index)
593 int i, real_index = 0;
594 unsigned int new_speed = 0;
597 * CONFIG_NR_CPUS cores are always in the same voltage, at the same
598 * frequency. But, cpu load is calculated individual in each cores,
599 * So we remeber the original target frequency and voltage of core0,
600 * and use the higher one
603 for_each_online_cpu(i) {
604 new_speed = max(new_speed, percpu_target[i]);
607 if (new_speed > sprd_top_frequency)
608 new_speed = sprd_top_frequency;
610 if (new_speed != sprd_cpufreq_conf->freq_tbl[index].frequency)
611 sprd_find_real_index(new_speed, &real_index);
614 sprd_real_set_cpufreq(policy, new_speed, real_index);
618 static int sprd_cpufreq_verify_speed(struct cpufreq_policy *policy)
620 if (policy->cpu > CONFIG_NR_CPUS) {
621 pr_err("%s no such cpu id %d\n", __func__, policy->cpu);
625 return cpufreq_frequency_table_verify(policy, sprd_cpufreq_conf->freq_tbl);
628 unsigned int cpufreq_min_limit = ULONG_MAX;
629 unsigned int cpufreq_max_limit = 0;
630 unsigned int dvfs_score_select = 5;
631 unsigned int dvfs_unplug_select = 2;
632 unsigned int dvfs_plug_select = 0;
633 unsigned int dvfs_score_hi[4] = {0};
634 unsigned int dvfs_score_mid[4] = {0};
635 unsigned int dvfs_score_critical[4] = {0};
636 extern unsigned int percpu_load[4];
637 extern unsigned int cur_window_size[4];
638 extern unsigned int cur_window_index[4];
639 extern unsigned int ga_percpu_total_load[4][8];
641 static DEFINE_SPINLOCK(cpufreq_state_lock);
643 static int sprd_cpufreq_target(struct cpufreq_policy *policy,
644 unsigned int target_freq,
645 unsigned int relation)
649 unsigned int new_speed;
650 struct cpufreq_frequency_table *table;
651 int max_freq = cpufreq_max_limit;
652 int min_freq = cpufreq_min_limit;
654 unsigned long irq_flags;
656 /* delay 30s to enable dvfs&dynamic-hotplug,
657 * except requirment from termal-cooling device
659 if(time_before(jiffies, boot_done)){
663 if((target_freq < min_freq) || (target_freq > max_freq))
665 pr_err("invalid target_freq: %d min_freq %d max_freq %d\n", target_freq,min_freq,max_freq);
669 target_freq = max(1000000, target_freq);
670 target_freq = max((unsigned int)pm_qos_request(PM_QOS_CPU_FREQ_MIN), target_freq);
671 target_freq = min((unsigned int)pm_qos_request(PM_QOS_CPU_FREQ_MAX), target_freq);
673 table = cpufreq_frequency_get_table(policy->cpu);
675 if (cpufreq_frequency_table_target(policy, table,
676 target_freq, relation, &index)) {
677 pr_err("invalid target_freq: %d\n", target_freq);
681 pr_debug("CPU_%d target %d relation %d (%d-%d) selected %d\n",
682 policy->cpu, target_freq, relation,
683 policy->min, policy->max, table[index].frequency);
685 new_speed = table[index].frequency;
687 percpu_target[policy->cpu] = new_speed;
688 pr_debug("%s cpu:%d new_speed:%u on cpu%d\n",
689 __func__, policy->cpu, new_speed, smp_processor_id());
691 ret = sprd_update_cpu_speed(policy, new_speed, index);
697 static unsigned int sprd_cpufreq_getspeed(unsigned int cpu)
699 if (cpu > CONFIG_NR_CPUS) {
700 pr_err("%s no such cpu id %d\n", __func__, cpu);
704 return sprd_raw_get_cpufreq();
707 static void sprd_set_cpureq_limit(void)
710 struct cpufreq_frequency_table *tmp = sprd_cpufreq_conf->freq_tbl;
711 for (i = 0; (tmp[i].frequency != CPUFREQ_TABLE_END); i++) {
712 cpufreq_min_limit = min(tmp[i].frequency, cpufreq_min_limit);
713 cpufreq_max_limit = max(tmp[i].frequency, cpufreq_max_limit);
715 pr_info("--xing-- %s max=%u min=%u\n", __func__, cpufreq_max_limit, cpufreq_min_limit);
718 #if defined(CONFIG_ARCH_SCX35LT8)
719 #define AON_APB_CHIP_ID REG_AON_APB_CHIP_ID0
721 #define AON_APB_CHIP_ID REG_AON_APB_CHIP_ID
723 static int sprd_freq_table_init(void)
725 /* we init freq table here depends on which chip being used */
726 if (soc_is_scx35_v0()) {
727 pr_info("%s es_chip\n", __func__);
728 sprd_cpufreq_conf->freq_tbl =
729 sc8830_cpufreq_table_data_es.freq_tbl;
730 sprd_cpufreq_conf->vddarm_mv =
731 sc8830_cpufreq_table_data_es.vddarm_mv;
732 } else if (soc_is_scx35_v1()) {
733 pr_info("%s cs_chip\n", __func__);
734 sprd_cpufreq_conf->freq_tbl =
735 sc8830_cpufreq_table_data_cs.freq_tbl;
736 sprd_cpufreq_conf->vddarm_mv =
737 sc8830_cpufreq_table_data_cs.vddarm_mv;
738 } else if (soc_is_sc7715()) {
739 sprd_cpufreq_conf->freq_tbl =
740 sc7715_cpufreq_table_data.freq_tbl;
741 sprd_cpufreq_conf->vddarm_mv =
742 sc7715_cpufreq_table_data.vddarm_mv;
743 } else if (soc_is_scx35g_v0() || soc_is_scx30g2_v0()) {
744 sprd_cpufreq_conf->freq_tbl =
745 sc8830t_cpufreq_table_data_es.freq_tbl;
746 sprd_cpufreq_conf->vddarm_mv =
747 sc8830t_cpufreq_table_data_es.vddarm_mv;
748 sprd_cpufreq_conf->max_axi_freq = 500000;
749 } else if (soc_is_scx9630_v0()) {
750 sprd_cpufreq_conf->freq_tbl =
751 sc9630_cpufreq_table_data.freq_tbl;
752 sprd_cpufreq_conf->vddarm_mv =
753 sc9630_cpufreq_table_data.vddarm_mv;
754 } else if (soc_is_scx9820_v0()) {
755 sprd_cpufreq_conf->freq_tbl =
756 sc9820_cpufreq_table_data.freq_tbl;
757 sprd_cpufreq_conf->vddarm_mv =
758 sc9820_cpufreq_table_data.vddarm_mv;
759 } else if (soc_is_scx7720_v0()) {
760 sprd_cpufreq_conf->freq_tbl =
761 sc7720_cpufreq_table_data.freq_tbl;
762 sprd_cpufreq_conf->vddarm_mv =
763 sc7720_cpufreq_table_data.vddarm_mv;
764 #if defined (CONFIG_ARCH_SCX35LT8) //TODO
765 } else if(__raw_readl(REG_AON_APB_CHIP_ID0) == 0x96310000){
766 sprd_cpufreq_conf->freq_tbl =
767 sc9631l64_cpufreq_table_data_es.freq_tbl;
768 sprd_cpufreq_conf->vddarm_mv =
769 sc9631l64_cpufreq_table_data_es.vddarm_mv;
771 } else if (__raw_readl(AON_APB_CHIP_ID) == 0x96310000) {
772 sprd_cpufreq_conf->freq_tbl =
773 sc9631l64_cpufreq_table_data_es.freq_tbl;
774 sprd_cpufreq_conf->vddarm_mv =
775 sc9631l64_cpufreq_table_data_es.vddarm_mv;
778 #if defined(CONFIG_ARCH_SCX35LT8)
779 pr_info("D-die chip id = 0x%08X\n", __raw_readl(REG_AON_APB_CHIP_ID0));
781 pr_err("%s error chip id\n", __func__);
784 pr_info("sprd_freq_table_init \n");
785 sprd_set_cpureq_limit();
789 static int sprd_cpufreq_init(struct cpufreq_policy *policy)
793 cpufreq_frequency_table_cpuinfo(policy, sprd_cpufreq_conf->freq_tbl);
794 policy->cur = sprd_raw_get_cpufreq(); /* current cpu frequency: KHz*/
796 * transition_latency 5us is enough now
797 * but sampling too often, unbalance and irregular on each online cpu
798 * so we set 500us here.
800 policy->cpuinfo.transition_latency = TRANSITION_LATENCY;
801 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
803 cpufreq_frequency_table_get_attr(sprd_cpufreq_conf->freq_tbl, policy->cpu);
805 percpu_target[policy->cpu] = policy->cur;
807 ret = cpufreq_frequency_table_cpuinfo(policy, sprd_cpufreq_conf->freq_tbl);
809 pr_err("%s Failed to config freq table: %d\n", __func__, ret);
812 pr_info("%s policy->cpu=%d, policy->cur=%u, ret=%d\n",
813 __func__, policy->cpu, policy->cur, ret);
815 cpumask_setall(policy->cpus);
820 static int sprd_cpufreq_exit(struct cpufreq_policy *policy)
825 static struct freq_attr *sprd_cpufreq_attr[] = {
826 &cpufreq_freq_attr_scaling_available_freqs,
830 static struct cpufreq_driver sprd_cpufreq_driver = {
831 .verify = sprd_cpufreq_verify_speed,
832 .target = sprd_cpufreq_target,
833 .get = sprd_cpufreq_getspeed,
834 .init = sprd_cpufreq_init,
835 .exit = sprd_cpufreq_exit,
837 .attr = sprd_cpufreq_attr,
838 #if defined(CONFIG_ARCH_SCX35)
839 .flags = CPUFREQ_SHARED
843 static ssize_t cpufreq_min_limit_show(struct device *dev, struct device_attribute *attr,char *buf)
845 memcpy(buf,&cpufreq_min_limit,sizeof(int));
849 static ssize_t cpufreq_max_limit_show(struct device *dev, struct device_attribute *attr,char *buf)
851 memcpy(buf,&cpufreq_max_limit,sizeof(int));
855 static ssize_t cpufreq_min_limit_debug_show(struct device *dev, struct device_attribute *attr,char *buf)
857 snprintf(buf,10,"%d\n",cpufreq_min_limit);
858 return strlen(buf) + 1;
861 static ssize_t cpufreq_max_limit_debug_show(struct device *dev, struct device_attribute *attr,char *buf)
863 snprintf(buf,10,"%d\n",cpufreq_max_limit);
864 return strlen(buf) + 1;
867 static ssize_t cpufreq_max_axi_freq_show(struct device *dev, struct device_attribute *attr,char *buf)
869 snprintf(buf, 10, "%d\n", sprd_cpufreq_conf->max_axi_freq);
870 return strlen(buf) + 1;
873 static ssize_t cpufreq_max_axi_freq_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
876 int temp,max_freq = 0;
879 ret = strict_strtoul(buf, 16, (long unsigned int *)&value);
885 for(j = 0; j < i; j++)
892 sprd_cpufreq_conf->max_axi_freq = max_freq;
896 static ssize_t cpufreq_min_limit_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
900 unsigned long irq_flags;
902 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
904 spin_lock_irqsave(&cpufreq_state_lock, irq_flags);
907 echo 0xabcde258 > /sys/power/cpufreq_min_limit means set the minimum limit to 600Mhz
909 if((value & 0xfffff000) == 0xabcde000)
911 cpufreq_min_limit = value & 0x00000fff;
912 cpufreq_min_limit *= 1000;
913 printk(KERN_ERR"cpufreq_min_limit value %s %d\n",buf,cpufreq_min_limit);
917 cpufreq_min_limit = *(int *)buf;
919 spin_unlock_irqrestore(&cpufreq_state_lock, irq_flags);
923 static ssize_t cpufreq_max_limit_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
927 unsigned long irq_flags;
929 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
931 spin_lock_irqsave(&cpufreq_state_lock, irq_flags);
935 echo 0xabcde4b0 > /sys/power/cpufreq_max_limit means set the maximum limit to 1200Mhz
937 if((value & 0xfffff000) == 0xabcde000)
939 cpufreq_max_limit = value & 0x00000fff;
940 cpufreq_max_limit *= 1000;
941 printk(KERN_ERR"cpufreq_max_limit value %s %d\n",buf,cpufreq_max_limit);
945 cpufreq_max_limit = *(int *)buf;
947 spin_unlock_irqrestore(&cpufreq_state_lock, irq_flags);
952 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
953 static ssize_t dvfs_score_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
957 unsigned long irq_flags;
959 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
961 printk(KERN_ERR"dvfs_score_input %x\n",value);
963 dvfs_score_select = (value >> 24) & 0x0f;
964 if(dvfs_score_select < 4)
966 dvfs_score_critical[dvfs_score_select] = (value >> 16) & 0xff;
967 dvfs_score_hi[dvfs_score_select] = (value >> 8) & 0xff;
968 dvfs_score_mid[dvfs_score_select] = value & 0xff;
975 static ssize_t dvfs_score_show(struct device *dev, struct device_attribute *attr,char *buf)
979 ret = snprintf(buf + ret,50,"dvfs_score_select %d\n",dvfs_score_select);
980 ret += snprintf(buf + ret,200,"dvfs_score_critical[1] = %d dvfs_score_hi[1] = %d dvfs_score_mid[1] = %d\n",dvfs_score_critical[1],dvfs_score_hi[1],dvfs_score_mid[1]);
981 ret += snprintf(buf + ret,200,"dvfs_score_critical[2] = %d dvfs_score_hi[2] = %d dvfs_score_mid[2] = %d\n",dvfs_score_critical[2],dvfs_score_hi[2],dvfs_score_mid[2]);
982 ret += snprintf(buf + ret,200,"dvfs_score_critical[3] = %d dvfs_score_hi[3] = %d dvfs_score_mid[3] = %d\n",dvfs_score_critical[3],dvfs_score_hi[3],dvfs_score_mid[3]);
984 ret += snprintf(buf + ret,200,"percpu_total_load[0] = %d,%d->%d\n",
985 percpu_load[0],ga_percpu_total_load[0][(cur_window_index[0] - 1 + 10) % 10],ga_percpu_total_load[0][cur_window_index[0]]);
986 ret += snprintf(buf + ret,200,"percpu_total_load[1] = %d,%d->%d\n",
987 percpu_load[1],ga_percpu_total_load[1][(cur_window_index[1] - 1 + 10) % 10],ga_percpu_total_load[1][cur_window_index[1]]);
988 ret += snprintf(buf + ret,200,"percpu_total_load[2] = %d,%d->%d\n",
989 percpu_load[2],ga_percpu_total_load[2][(cur_window_index[2] - 1 + 10) % 10],ga_percpu_total_load[2][cur_window_index[2]]);
990 ret += snprintf(buf + ret,200,"percpu_total_load[3] = %d,%d->%d\n",
991 percpu_load[3],ga_percpu_total_load[3][(cur_window_index[3] - 1 + 10) % 10],ga_percpu_total_load[3][cur_window_index[3]]);
993 return strlen(buf) + 1;
996 static ssize_t dvfs_unplug_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
1000 unsigned long irq_flags;
1002 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
1004 printk(KERN_ERR"dvfs_score_input %x\n",value);
1006 dvfs_unplug_select = (value >> 24) & 0x0f;
1007 if(dvfs_unplug_select > 7)
1009 cur_window_size[0]= (value >> 8) & 0xff;
1010 cur_window_size[1]= (value >> 8) & 0xff;
1011 cur_window_size[2]= (value >> 8) & 0xff;
1012 cur_window_size[3]= (value >> 8) & 0xff;
1017 static ssize_t dvfs_unplug_show(struct device *dev, struct device_attribute *attr,char *buf)
1021 ret = snprintf(buf + ret,50,"dvfs_unplug_select %d\n",dvfs_unplug_select);
1022 ret += snprintf(buf + ret,100,"cur_window_size[0] = %d\n",cur_window_size[0]);
1023 ret += snprintf(buf + ret,100,"cur_window_size[1] = %d\n",cur_window_size[1]);
1024 ret += snprintf(buf + ret,100,"cur_window_size[2] = %d\n",cur_window_size[2]);
1025 ret += snprintf(buf + ret,100,"cur_window_size[3] = %d\n",cur_window_size[3]);
1027 return strlen(buf) + 1;
1031 static ssize_t dvfs_plug_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
1035 unsigned long irq_flags;
1037 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
1039 printk(KERN_ERR"dvfs_plug_select %x\n",value);
1041 dvfs_plug_select = (value ) & 0x0f;
1046 static ssize_t dvfs_plug_show(struct device *dev, struct device_attribute *attr,char *buf)
1050 ret = snprintf(buf + ret,50,"dvfs_plug_select %d\n",dvfs_plug_select);
1052 return strlen(buf) + 1;
1056 static ssize_t cpufreq_table_show(struct device *dev, struct device_attribute *attr,char *buf)
1058 memcpy(buf,sprd_cpufreq_conf->freq_tbl,sizeof(* sprd_cpufreq_conf->freq_tbl));
1059 return sizeof(* sprd_cpufreq_conf->freq_tbl);
1062 static ssize_t dvfs_prop_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
1066 unsigned long irq_flags;
1068 printk(KERN_ERR"dvfs_status %s\n",buf);
1069 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
1071 printk(KERN_ERR"dvfs_plug_select %x\n",value);
1073 dvfs_plug_select = (value ) & 0x0f;
1077 static ssize_t dvfs_prop_show(struct device *dev, struct device_attribute *attr,char *buf)
1081 ret = snprintf(buf + ret,50,"dvfs_plug_select %d\n",dvfs_plug_select);
1083 return strlen(buf) + 1;
1086 #ifdef CONFIG_SPRD_AVS_DEBUG
1087 extern unsigned int g_avs_log_flag;
1089 static ssize_t avs_log_store(struct device *dev, struct device_attribute *attr,const char *buf, size_t count)
1093 unsigned long irq_flags;
1095 printk(KERN_ERR"g_avs_log_flag %s\n",buf);
1096 ret = strict_strtoul(buf,16,(long unsigned int *)&value);
1098 printk(KERN_ERR"g_avs_log_flag %x\n",value);
1100 g_avs_log_flag = (value ) & 0x0f;
1104 static ssize_t avs_log_show(struct device *dev, struct device_attribute *attr,char *buf)
1108 ret = snprintf(buf + ret,50,"g_avs_log_flag %d\n",g_avs_log_flag);
1110 return strlen(buf) + 1;
1113 static DEVICE_ATTR(cpufreq_min_limit, 0660, cpufreq_min_limit_show, cpufreq_min_limit_store);
1114 static DEVICE_ATTR(cpufreq_max_limit, 0660, cpufreq_max_limit_show, cpufreq_max_limit_store);
1115 static DEVICE_ATTR(cpufreq_min_limit_debug, 0440, cpufreq_min_limit_debug_show, NULL);
1116 static DEVICE_ATTR(cpufreq_max_limit_debug, 0440, cpufreq_max_limit_debug_show, NULL);
1117 static DEVICE_ATTR(cpufreq_table, 0440, cpufreq_table_show, NULL);
1118 static DEVICE_ATTR(cpufreq_max_axi_freq, 0660, cpufreq_max_axi_freq_show, cpufreq_max_axi_freq_store);
1120 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
1121 static DEVICE_ATTR(dvfs_score, 0660, dvfs_score_show, dvfs_score_store);
1122 static DEVICE_ATTR(dvfs_unplug, 0660, dvfs_unplug_show, dvfs_unplug_store);
1123 static DEVICE_ATTR(dvfs_plug, 0660, dvfs_plug_show, dvfs_plug_store);
1125 static DEVICE_ATTR(dvfs_prop, 0660, dvfs_prop_show, dvfs_prop_store);
1127 #ifdef CONFIG_SPRD_AVS_DEBUG
1128 static DEVICE_ATTR(avs_log, 0660, avs_log_show, avs_log_store);
1130 static struct attribute *g[] = {
1131 &dev_attr_cpufreq_min_limit.attr,
1132 &dev_attr_cpufreq_max_limit.attr,
1133 &dev_attr_cpufreq_min_limit_debug.attr,
1134 &dev_attr_cpufreq_max_limit_debug.attr,
1135 &dev_attr_cpufreq_table.attr,
1136 &dev_attr_cpufreq_max_axi_freq.attr,
1137 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SPRDEMAND
1138 &dev_attr_dvfs_score.attr,
1139 &dev_attr_dvfs_unplug.attr,
1140 &dev_attr_dvfs_plug.attr,
1142 &dev_attr_dvfs_prop.attr,
1143 #ifdef CONFIG_SPRD_AVS_DEBUG
1144 &dev_attr_avs_log.attr,
1149 static struct attribute_group attr_group = {
1153 static int sprd_cpufreq_policy_notifier(
1154 struct notifier_block *nb, unsigned long event, void *data)
1159 static struct notifier_block sprd_cpufreq_policy_nb = {
1160 .notifier_call = sprd_cpufreq_policy_notifier,
1164 static int sprd_cpufreq_min_qos_handler(struct notifier_block *b, unsigned long val, void *v)
1167 struct cpufreq_policy *policy;
1169 policy = cpufreq_cpu_get(0);
1173 if (policy->cur >= val) {
1174 cpufreq_cpu_put(policy);
1178 ret = __cpufreq_driver_target(policy, val, CPUFREQ_RELATION_L);
1180 cpufreq_cpu_put(policy);
1191 static int sprd_cpufreq_max_qos_handler(struct notifier_block *b, unsigned long val, void *v)
1194 struct cpufreq_policy *policy;
1196 policy = cpufreq_cpu_get(0);
1200 if (policy->cur <= val) {
1201 cpufreq_cpu_put(policy);
1205 ret = __cpufreq_driver_target(policy, val, CPUFREQ_RELATION_H);
1207 cpufreq_cpu_put(policy);
1219 static struct notifier_block sprd_cpufreq_min_qos_notifier = {
1220 .notifier_call = sprd_cpufreq_min_qos_handler,
1223 static struct notifier_block sprd_cpufreq_max_qos_notifier = {
1224 .notifier_call = sprd_cpufreq_max_qos_handler,
1227 static int __init sprd_cpufreq_modinit(void)
1230 #if defined(CONFIG_SPRD_CPUFREQ_DT_DRIVER)
1231 struct platform_device_info devinfo = { .name = "cpufreq-dt-sprd", };
1233 platform_device_register_full(&devinfo);
1238 #if defined(CONFIG_ARCH_SCX35)
1239 sprd_cpufreq_conf = &sc8830_cpufreq_conf;
1240 #elif defined(CONFIG_ARCH_SC8825)
1241 sprd_cpufreq_conf = &sc8825_cpufreq_conf;
1244 #if defined(CONFIG_ARCH_SCX35)
1245 ret = sprd_freq_table_init();
1249 sprd_top_frequency = sprd_cpufreq_conf->freq_tbl[0].frequency;
1250 /* TODO:need verify for the initialization of limited max freq */
1252 sprd_cpufreq_conf->clk = clk_get_sys(NULL, "clk_mcu");
1253 if (IS_ERR(sprd_cpufreq_conf->clk))
1254 return PTR_ERR(sprd_cpufreq_conf->clk);
1256 sprd_cpufreq_conf->mpllclk = clk_get_sys(NULL, "clk_mpll");
1257 if (IS_ERR(sprd_cpufreq_conf->mpllclk))
1258 return PTR_ERR(sprd_cpufreq_conf->mpllclk);
1260 #if !defined(CONFIG_ARCH_SCX35L) && !defined(CONFIG_ARCH_SCX20)
1261 sprd_cpufreq_conf->tdpllclk = clk_get_sys(NULL, "clk_tdpll");
1262 if (IS_ERR(sprd_cpufreq_conf->tdpllclk))
1263 return PTR_ERR(sprd_cpufreq_conf->tdpllclk);
1265 // sprd_cpufreq_conf->tdpllclk = clk_get_sys(NULL, "clk_twpll");
1266 sprd_cpufreq_conf->tdpllclk = clk_get_sys(NULL, "clk_768m");
1267 if (IS_ERR(sprd_cpufreq_conf->tdpllclk))
1268 return PTR_ERR(sprd_cpufreq_conf->tdpllclk);
1270 mutex_init(&cpufreq_vddarm_lock);
1272 sprd_cpufreq_conf->regulator = regulator_get(NULL, "vddarm");
1274 if (IS_ERR(sprd_cpufreq_conf->regulator))
1275 return PTR_ERR(sprd_cpufreq_conf->regulator);
1277 /* set max voltage first */
1279 regulator_set_voltage(sprd_cpufreq_conf->regulator,
1280 sprd_cpufreq_conf->vddarm_mv[0],
1281 sprd_cpufreq_conf->vddarm_mv[0]);
1283 clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->tdpllclk);
1285 * clk_set_rate(sprd_cpufreq_conf->mpllclk, (sprd_top_frequency * 1000));
1287 clk_set_parent(sprd_cpufreq_conf->clk, sprd_cpufreq_conf->mpllclk);
1288 global_freqs.old = sprd_raw_get_cpufreq();
1292 boot_done = jiffies + DVFS_BOOT_TIME;
1293 ret = cpufreq_register_notifier(
1294 &sprd_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
1298 ret = cpufreq_register_driver(&sprd_cpufreq_driver);
1300 pm_qos_add_notifier(PM_QOS_CPU_FREQ_MIN, &sprd_cpufreq_min_qos_notifier);
1301 pm_qos_add_notifier(PM_QOS_CPU_FREQ_MAX, &sprd_cpufreq_max_qos_notifier);
1303 /* remove duplicated sysfs files */
1304 //ret = sysfs_create_group(power_kobj, &attr_group);
1308 static void __exit sprd_cpufreq_modexit(void)
1310 #if defined(CONFIG_ARCH_SCX35)
1311 if (!IS_ERR_OR_NULL(sprd_cpufreq_conf->regulator))
1312 regulator_put(sprd_cpufreq_conf->regulator);
1314 cpufreq_unregister_driver(&sprd_cpufreq_driver);
1315 cpufreq_unregister_notifier(
1316 &sprd_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
1320 module_init(sprd_cpufreq_modinit);
1321 module_exit(sprd_cpufreq_modexit);
1323 MODULE_DESCRIPTION("cpufreq driver for Spreadtrum");
1324 MODULE_LICENSE("GPL");