2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
4 * Copyright (C) 2015 Spreadtrum.
5 * zhaoyang.huang <zhaoyang.huang@spreadtrum.com>
7 * The OPP code in function set_target() is reused from
8 * drivers/cpufreq/omap-cpufreq.c
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/clk.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/cpufreq.h>
21 #include <linux/cpumask.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
25 #include <linux/opp.h>
26 #include <linux/platform_device.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/slab.h>
29 #include <linux/thermal.h>
30 #include <linux/regulator/driver.h>
31 #include <linux/delay.h>
33 #include <soc/sprd/hardware.h>
34 #include <soc/sprd/regulator.h>
35 #include <soc/sprd/adi.h>
36 #include <soc/sprd/sci.h>
37 #include <soc/sprd/sci_glb_regs.h>
38 #include <soc/sprd/arch_misc.h>
40 #ifndef CONFIG_REGULATOR
42 int regulator_get_voltage(struct regulator *reg)
46 int regulator_set_voltage_tol(struct regulator * reg,int volt,int tol)
50 struct regulator *regulator_get(struct device *dev,char *reg)
54 struct regulator *regulator_put(struct device *dev)
59 int regulator_set_voltage_time(struct regulator * reg,int min_uV,int max_uV)
64 struct cpufreq_dt_platform_data {
66 * True when each CPU has its own clock to control its
67 * frequency, false when all CPUs are controlled by a single
70 bool independent_clocks;
74 struct device *cpu_dev;
75 struct regulator *cpu_reg;
76 struct thermal_cooling_device *cdev;
77 unsigned int voltage_tolerance; /* in percentage */
80 struct cpufreq_policy_sprd {
82 struct cpufreq_policy *policy; /* see above */
83 struct cpufreq_frequency_table *freq_table;
84 /* For cpufreq driver's internal use */
89 static DEFINE_PER_CPU(struct cpufreq_policy_sprd , cpufreq_cpu_data_sprd);
91 #define SHARK_TDPLL_FREQUENCY (768000)
93 static int cpufreq_set_clock(struct cpufreq_policy *policy,unsigned int freq)
96 struct cpufreq_policy_sprd * sprd_policy;
97 int reg_mcu_ckg_div_set = 0;
98 int reg_mcu_ckg_div_clr = 0;
103 sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
105 reg_mcu_ckg_div_set = (policy->cpu < 4) ? BITS_APCPU_LIT_MCU_CKG_DIV(1):BITS_APCPU_BIG_MCU_CKG_DIV(1);
106 reg_mcu_ckg_div_clr = (policy->cpu < 4) ? BITS_APCPU_LIT_MCU_CKG_DIV(0):BITS_APCPU_BIG_MCU_CKG_DIV(0);
108 pmpllclk = (policy->cpu < 4) ? "clk_mpll0":"clk_mpll";
109 struct clk *mpllclk = clk_get_sys(NULL, pmpllclk);
110 if (IS_ERR(mpllclk)){
111 pr_err("mpllclk get err\n");
112 return PTR_ERR(mpllclk);
114 #if !defined(CONFIG_ARCH_SCX35L)
115 struct clk *tdpllclk = clk_get_sys(NULL, "clk_tdpll");
117 struct clk *tdpllclk = clk_get_sys(NULL, "clk_768m");
119 if (IS_ERR(tdpllclk)){
120 pr_err("tdpllclk get err\n");
121 return PTR_ERR(tdpllclk);
123 ret = clk_set_parent(sprd_policy->clk, tdpllclk);
125 pr_err("Failed to set cpu parent to tdpll %d\n",ret);
129 if (freq == SHARK_TDPLL_FREQUENCY/2) {
131 #ifndef CONFIG_ARCH_SCX35L
132 sci_glb_set(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
134 sci_glb_set(REG_AP_AHB_CA7_CKG_DIV_CFG,reg_mcu_ckg_div_set);
136 sci_glb_clr(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
137 } else if (freq == SHARK_TDPLL_FREQUENCY) {
138 #ifndef CONFIG_ARCH_SCX35L
139 sci_glb_clr(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
141 sci_glb_clr(REG_AP_AHB_CA7_CKG_DIV_CFG, reg_mcu_ckg_div_clr);
143 sci_glb_clr(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
145 if (!(sci_glb_read(REG_PMU_APB_MPLL_REL_CFG, -1) & BIT_MPLL_AP_SEL)) {
146 sci_glb_set(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
149 ret = clk_set_rate(mpllclk, freq);
151 pr_err("Failed to set mpll rate %d\n",ret);
152 ret = clk_set_parent(sprd_policy->clk, mpllclk);
154 pr_err("Failed to set cpu parent to mpll %d\n",ret);
155 #ifndef CONFIG_ARCH_SCX35L
156 sci_glb_clr(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
158 sci_glb_clr(REG_AP_AHB_CA7_CKG_DIV_CFG, reg_mcu_ckg_div_clr);
161 pr_info("[DVFS-dt]: Set Freq %dKHz = %dKHz",freq / 1000,clk_get_rate(sprd_policy->clk) / 1000);
165 static int set_target(struct cpufreq_policy *policy,unsigned int target_freq, unsigned int relation)
168 struct cpufreq_frequency_table *freq_table;
169 struct cpufreq_policy_sprd * sprd_policy;
171 struct private_data *priv;
172 struct device *cpu_dev;
173 struct regulator *cpu_reg;
174 unsigned long volt = 0, volt_old = 0, tol = 0;
175 unsigned int old_freq, new_freq;
176 long freq_Hz, freq_exact;
179 struct cpufreq_freqs freqs;
184 sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
185 freq_table = sprd_policy->freq_table;
187 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
190 pr_err("failed to match target freqency %d: %d\n",
195 cpu_clk = sprd_policy->clk;
196 priv = sprd_policy->driver_data;
197 cpu_dev = priv->cpu_dev;
198 cpu_reg = priv->cpu_reg;
200 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
202 freq_Hz = freq_table[index].frequency * 1000;
204 freq_exact = freq_Hz;
205 new_freq = freq_Hz / 1000;
206 old_freq = clk_get_rate(cpu_clk) / 1000;
207 freqs.old = old_freq;
208 freqs.new = new_freq;
210 if (!IS_ERR(cpu_reg)) {
211 unsigned long opp_freq;
214 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
217 dev_err(cpu_dev, "failed to find OPP for %ld\n",
221 volt = opp_get_voltage(opp);
222 opp_freq = opp_get_freq(opp);
224 tol = volt * priv->voltage_tolerance / 100;
225 volt_old = regulator_get_voltage(cpu_reg);
226 pr_info("DVFS-dt:Found OPP: %ld kHz, %ld uV\n",
227 opp_freq / 1000, volt);
230 pr_info("DVFS-dt:%u MHz, %ld mV --> %u MHz, %ld mV\n",
231 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
232 new_freq / 1000, volt ? volt / 1000 : -1);
234 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
236 /* scaling up? scale voltage before frequency */
237 if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
238 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
240 pr_info("DVFS-dt:failed to scale voltage %d %d up: %d\n",
246 ret = clk_set_rate(cpu_clk, freq_exact);
248 ret = cpufreq_set_clock(policy,freq_exact);
251 pr_info("DVFS-dt:failed to set clock %d rate: %d\n",freq_exact, ret);
252 if (!IS_ERR(cpu_reg) && volt_old > 0)
253 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
258 /* scaling down? scale voltage after frequency */
259 if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
260 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
262 pr_info("DVFS-dt:failed to scale voltage %d %d down: %d\n",
265 ret = clk_set_rate(cpu_clk, old_freq);
267 ret = cpufreq_set_clock(policy, old_freq);
272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
277 static int verify(struct cpufreq_policy *policy)
279 struct cpufreq_policy_sprd * sprd_policy;
284 sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
286 ret = cpufreq_frequency_table_verify(policy, sprd_policy->freq_table);
289 pr_err("[cpufreq-dt-sprd] verify failed %d\n",ret);
294 static int allocate_resources(int cpu, struct device **cdev,
295 struct regulator **creg, struct clk **cclk)
297 struct device *cpu_dev;
298 struct device *cpu_dev_reg;
299 struct regulator *cpu_reg;
302 char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
303 struct regulator_dev *rdev;
305 cpu_dev = get_cpu_device(cpu);
306 cpu_dev->of_node = of_get_cpu_node(cpu, NULL);
309 pr_err("failed to get cpu%d device\n", cpu);
313 /* Try "cpu0" for older DTs */
316 cpu_dev_reg = cpu_dev;
323 cpu_reg = regulator_get(cpu_dev_reg, reg);
326 if (IS_ERR(cpu_reg)) {
328 * If cpu's regulator supply node is present, but regulator is
329 * not yet registered, we should try defering probe.
331 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
332 dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
334 pr_err("[allocate_resources] cpu%d failed defered\n",cpu);
335 return -EPROBE_DEFER;
338 /* Try with "cpu-supply" */
339 if (reg == reg_cpu0) {
344 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
345 cpu, PTR_ERR(cpu_reg));
346 pr_err("[allocate_resources] no regulator for cpu%d\n",cpu) ;
349 cpu_clk = clk_get(cpu_dev, NULL);
350 if (IS_ERR(cpu_clk)) {
352 if (!IS_ERR(cpu_reg))
353 regulator_put(cpu_reg);
355 ret = PTR_ERR(cpu_clk);
358 * If cpu's clk node is present, but clock is not yet
359 * registered, we should try defering probe.
361 if (ret == -EPROBE_DEFER){
362 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
363 pr_err("[allocate_resources] cpu%d clock not ready, retry\n",cpu);
366 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
368 pr_err("[allocate_resources] failed to get cpu%d clock: %d\n", cpu,ret);
376 rdev = regulator_get_drvdata(cpu_reg);
377 pr_info("[cpufreq-dt] cpu %d resource allocated cpu_reg %s\n",cpu,rdev->desc->supply_name);
382 static int cpufreq_init(struct cpufreq_policy *policy)
384 //struct cpufreq_dt_platform_data *pd;
385 struct cpufreq_frequency_table *freq_table;
386 struct device_node *np;
387 struct private_data *priv;
388 struct cpufreq_policy_sprd * sprd_policy;
389 struct device *cpu_dev;
390 struct regulator *cpu_reg;
392 unsigned long min_uV = ~0, max_uV = 0;
393 unsigned int transition_latency;
400 sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
402 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
404 pr_err("[cpufreq-dt] %s: Failed to allocate resources: %d\n", __func__, ret);
408 np = of_node_get(cpu_dev->of_node);
410 pr_err("[cpufreq-dt] failed to find cpu%d node\n", policy->cpu);
412 goto out_put_reg_clk;
415 /*init the opp table of device */
416 if (sprd_policy->opp_initialized != true) {
417 /* OPPs might be populated at runtime, don't check for error here */
418 of_init_opp_table(cpu_dev);
419 sprd_policy->opp_initialized = true;
423 * But we need OPP table to function so if it is not there let's
424 * give platform code chance to provide it for us.
426 ret = opp_get_opp_count(cpu_dev);
428 pr_err("[cpufreq-dt] OPP table is not ready, deferring probe\n");
433 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
439 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
441 if (of_property_read_u32(np, "clock-latency", &transition_latency))
442 transition_latency = CPUFREQ_ETERNAL;
444 if (!IS_ERR(cpu_reg)) {
445 unsigned long opp_freq = 0;
448 * Disable any OPPs where the connected regulator isn't able to
449 * provide the specified voltage and record minimum and maximum
454 unsigned long opp_uV, tol_uV;
457 opp = opp_find_freq_ceil(cpu_dev, &opp_freq);
460 pr_err("[cpufreq-dt]:opp_find_freq_ceil failed\n");
463 opp_uV = opp_get_voltage(opp);
466 tol_uV = opp_uV * priv->voltage_tolerance / 100;
467 if (regulator_is_supported_voltage(cpu_reg, opp_uV,
474 opp_disable(cpu_dev, opp_freq);
480 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
482 transition_latency += ret * 1000;
486 FIXME:set the proper value here according to the HW properties
488 transition_latency = 100 * 1000; /*ns*/
490 ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
492 pr_err("[cpufreq-dt]failed to init cpufreq table: %d\n", ret);
496 priv->cpu_dev = cpu_dev;
497 priv->cpu_reg = cpu_reg;
498 sprd_policy->driver_data = priv;
500 sprd_policy->clk = cpu_clk;
501 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
503 pr_err("[cpufreq-dt]%s: invalid frequency table: %d\n", __func__,
505 goto out_free_cpufreq_table;
508 sprd_policy->freq_table = freq_table;
509 policy->cpuinfo.transition_latency = transition_latency;
512 pd = cpufreq_get_driver_data();
513 if (!pd || !pd->independent_clocks)
514 cpumask_setall(policy->cpus);
516 pr_info("[cpufreq-dt-sprd] before mask policy[%d]->cpus %x",policy->cpu,*policy->cpus);
517 cpumask_or(policy->cpus, policy->cpus, cpu_coregroup_mask(policy->cpu));
518 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
520 FIX ME: it is possible that the sprd_policy will be re-initialized by cpufreq_add_dev->cpufreq-init. However, it shouldn't
521 happen for there is condition judgement for if the policy has been initialized by the reason of they are in the same cluster
523 for_each_cpu(j, policy->cpus){
524 memcpy(&per_cpu(cpufreq_cpu_data_sprd, j),sprd_policy,sizeof(*sprd_policy));
526 pr_info("[cpufreq-dt-sprd] after mask policy[%d]->cpus %x",policy->cpu,*policy->cpus);
530 pr_info("[cpufreq-dt]: cpu %d cpufreq table initialized success\n", policy->cpu);
532 policy->cur = clk_get_rate(cpu_clk) / 1000;
534 pr_info("[cpufreq-dt]:policy->cur %d\n",policy->cur);
537 out_free_cpufreq_table:
538 pr_err("[cpufreq-dt]:out_free_cpufreq_table err return\n");
539 opp_free_cpufreq_table(cpu_dev, &freq_table);
541 pr_err("[cpufreq-dt]:out_free_priv err return\n");
545 of_free_opp_table(cpu_dev);
547 pr_err("[cpufreq-dt]:out_free_opp err return\n");
551 if (!IS_ERR(cpu_reg))
552 regulator_put(cpu_reg);
554 pr_err("[cpufreq-dt]:out_put_reg_clk err return\n");
558 static int cpufreq_exit(struct cpufreq_policy *policy)
560 struct cpufreq_policy_sprd * sprd_policy;
561 struct private_data *priv;
566 sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
567 priv = sprd_policy->driver_data;
570 cpufreq_cooling_unregister(priv->cdev);
571 opp_free_cpufreq_table(priv->cpu_dev, &sprd_policy->freq_table);
573 of_free_opp_table(priv->cpu_dev);
575 clk_put(sprd_policy->clk);
576 if (!IS_ERR(priv->cpu_reg))
577 regulator_put(priv->cpu_reg);
583 static void cpufreq_ready(struct cpufreq_policy *policy)
585 struct private_data *priv;
586 struct device_node *np;
587 struct cpufreq_policy_sprd * sprd_policy;
592 sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
593 priv = sprd_policy->driver_data;
594 np= of_node_get(priv->cpu_dev->of_node);
599 * For now, just loading the cooling device;
600 * thermal DT code takes care of matching them.
602 if (of_find_property(np, "#cooling-cells", NULL)) {
603 priv->cdev = of_cpufreq_cooling_register(np,
604 policy->related_cpus);
605 if (IS_ERR(priv->cdev)) {
606 dev_err(priv->cpu_dev,
607 "running cpufreq without cooling device: %ld\n",
608 PTR_ERR(priv->cdev));
617 unsigned int cpufreq_generic_get(unsigned int cpu)
619 struct cpufreq_policy_sprd *sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, cpu);
621 if (!sprd_policy || IS_ERR(sprd_policy->clk)) {
622 pr_err("%s: No %s associated to cpu: %d\n",
623 __func__, sprd_policy ? "clk" : "policy", cpu);
627 return clk_get_rate(sprd_policy->clk) / 1000;
630 static struct freq_attr *cpu0_cpufreq_attr[] = {
631 &cpufreq_freq_attr_scaling_available_freqs,
635 static struct cpufreq_driver dt_cpufreq_driver = {
636 .flags = CPUFREQ_STICKY,
638 .target = set_target,
639 .get = cpufreq_generic_get,
640 .init = cpufreq_init,
641 .exit = cpufreq_exit,
642 .name = "cpufreq-dt",
644 .ready = cpufreq_ready,
646 .attr = cpu0_cpufreq_attr,
647 .have_governor_per_policy = true,
651 static int sprd_dt_cpufreq_probe(struct platform_device *pdev)
653 struct device *cpu_dev;
654 struct regulator *cpu_reg;
659 * All per-cluster (CPUs sharing clock/voltages) initialization is done
660 * from ->init(). In probe(), we just need to make sure that clk and
661 * regulators are available. Else defer probe and retry.
663 * FIXME: Is checking this only for CPU0 sufficient ?
665 ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
667 pr_err("cpufreq-dt-sprd register failed %d\n",ret);
671 if (!IS_ERR(cpu_reg))
672 regulator_put(cpu_reg);
675 dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
678 ret = cpufreq_register_driver(&dt_cpufreq_driver);
680 pr_err("failed register driver: %d\n", ret);
685 static int sprd_dt_cpufreq_remove(struct platform_device *pdev)
687 cpufreq_unregister_driver(&dt_cpufreq_driver);
691 static struct platform_driver sprd_dt_cpufreq_platdrv = {
693 .name = "cpufreq-dt-sprd",
695 .probe = sprd_dt_cpufreq_probe,
696 .remove = sprd_dt_cpufreq_remove,
698 module_platform_driver(sprd_dt_cpufreq_platdrv);
700 MODULE_AUTHOR("zhaoyang.huang <zhaoyang.huang@spreadtrum.com>");
701 MODULE_DESCRIPTION("bL cpufreq driver");
702 MODULE_LICENSE("GPL");