usb: gadget: g_ffs: Allow to set bmAttributes of configuration
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / cpufreq / cpufreq-dt-sprd.c
1 /*
2  * Copyright (C) 2012 Freescale Semiconductor, Inc.
3  *
4  * Copyright (C) 2015 Spreadtrum.
5  * zhaoyang.huang <zhaoyang.huang@spreadtrum.com>
6  *
7  * The OPP code in function set_target() is reused from
8  * drivers/cpufreq/omap-cpufreq.c
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
16
17 #include <linux/clk.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/cpufreq.h>
21 #include <linux/cpumask.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/opp.h>
26 #include <linux/platform_device.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/slab.h>
29 #include <linux/thermal.h>
30 #include <linux/regulator/driver.h>
31 #include <linux/delay.h>
32
33 #include <soc/sprd/hardware.h>
34 #include <soc/sprd/regulator.h>
35 #include <soc/sprd/adi.h>
36 #include <soc/sprd/sci.h>
37 #include <soc/sprd/sci_glb_regs.h>
38 #include <soc/sprd/arch_misc.h>
39
40 #ifndef CONFIG_REGULATOR
41 /*
42 int regulator_get_voltage(struct regulator *reg)
43 {
44         return 0;
45 }
46 int regulator_set_voltage_tol(struct regulator * reg,int volt,int tol)
47 {
48         return 0;
49 }
50 struct regulator *regulator_get(struct device *dev,char *reg)
51 {
52         return 0;
53 }
54 struct regulator *regulator_put(struct device *dev)
55 {
56         return 0;
57 }
58 */
59 int regulator_set_voltage_time(struct regulator * reg,int min_uV,int max_uV)
60 {
61         return 0;
62 }
63 #endif
64 struct cpufreq_dt_platform_data {
65         /*
66          * True when each CPU has its own clock to control its
67          * frequency, false when all CPUs are controlled by a single
68          * clock.
69          */
70         bool independent_clocks;
71 };
72
73 struct private_data {
74         struct device *cpu_dev;
75         struct regulator *cpu_reg;
76         struct thermal_cooling_device *cdev;
77         unsigned int voltage_tolerance; /* in percentage */
78 };
79
80 struct cpufreq_policy_sprd {
81         struct clk              *clk;
82         struct cpufreq_policy           *policy; /* see above */
83         struct cpufreq_frequency_table  *freq_table;
84         /* For cpufreq driver's internal use */
85         void                    *driver_data;
86         bool opp_initialized;
87 };
88
89 static DEFINE_PER_CPU(struct cpufreq_policy_sprd , cpufreq_cpu_data_sprd);
90
91 #define SHARK_TDPLL_FREQUENCY   (768000)
92
93 static int cpufreq_set_clock(struct cpufreq_policy *policy,unsigned int freq)
94 {
95         int ret;
96         struct cpufreq_policy_sprd * sprd_policy;
97         int reg_mcu_ckg_div_set = 0;
98         int reg_mcu_ckg_div_clr = 0;
99         char *pmpllclk;
100         if(!policy)
101                 return -ENODEV;
102
103         sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
104
105         reg_mcu_ckg_div_set = (policy->cpu < 4) ? BITS_APCPU_LIT_MCU_CKG_DIV(1):BITS_APCPU_BIG_MCU_CKG_DIV(1);
106         reg_mcu_ckg_div_clr = (policy->cpu < 4) ? BITS_APCPU_LIT_MCU_CKG_DIV(0):BITS_APCPU_BIG_MCU_CKG_DIV(0);
107
108         pmpllclk = (policy->cpu < 4) ? "clk_mpll0":"clk_mpll";
109         struct clk *mpllclk = clk_get_sys(NULL, pmpllclk);
110         if (IS_ERR(mpllclk)){
111                 pr_err("mpllclk get err\n");
112                 return PTR_ERR(mpllclk);
113         }
114 #if !defined(CONFIG_ARCH_SCX35L)
115         struct clk *tdpllclk = clk_get_sys(NULL, "clk_tdpll");
116 #else
117         struct clk *tdpllclk = clk_get_sys(NULL, "clk_768m");
118 #endif
119         if (IS_ERR(tdpllclk)){
120                 pr_err("tdpllclk get err\n");
121                 return PTR_ERR(tdpllclk);
122         }
123         ret = clk_set_parent(sprd_policy->clk, tdpllclk);
124         if (ret){
125                 pr_err("Failed to set cpu parent to tdpll %d\n",ret);
126                 return ret;
127         }
128
129         if (freq == SHARK_TDPLL_FREQUENCY/2) {
130                 //ca7 clk div
131                 #ifndef CONFIG_ARCH_SCX35L
132                 sci_glb_set(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
133                 #else
134                 sci_glb_set(REG_AP_AHB_CA7_CKG_DIV_CFG,reg_mcu_ckg_div_set);
135                 #endif
136                 sci_glb_clr(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
137         } else if (freq == SHARK_TDPLL_FREQUENCY) {
138                 #ifndef CONFIG_ARCH_SCX35L
139                 sci_glb_clr(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
140                 #else
141                 sci_glb_clr(REG_AP_AHB_CA7_CKG_DIV_CFG, reg_mcu_ckg_div_clr);
142                 #endif
143                 sci_glb_clr(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
144         } else {
145                 if (!(sci_glb_read(REG_PMU_APB_MPLL_REL_CFG, -1) & BIT_MPLL_AP_SEL)) {
146                         sci_glb_set(REG_PMU_APB_MPLL_REL_CFG, BIT_MPLL_AP_SEL);
147                         udelay(500);
148                 }
149                 ret = clk_set_rate(mpllclk, freq);
150                 if (ret)
151                         pr_err("Failed to set mpll rate %d\n",ret);
152                 ret = clk_set_parent(sprd_policy->clk, mpllclk);
153                 if (ret)
154                         pr_err("Failed to set cpu parent to mpll %d\n",ret);
155                 #ifndef CONFIG_ARCH_SCX35L
156                 sci_glb_clr(REG_AP_AHB_CA7_CKG_CFG, BITS_CA7_MCU_CKG_DIV(1));
157                 #else
158                 sci_glb_clr(REG_AP_AHB_CA7_CKG_DIV_CFG, reg_mcu_ckg_div_clr);
159                 #endif
160         }
161         pr_info("[DVFS-dt]: Set Freq %dKHz = %dKHz",freq / 1000,clk_get_rate(sprd_policy->clk) / 1000);
162         return 0;
163 }
164
165 static int set_target(struct cpufreq_policy *policy,unsigned int target_freq, unsigned int relation)
166 {
167         struct opp *opp;
168         struct cpufreq_frequency_table *freq_table;
169         struct cpufreq_policy_sprd * sprd_policy;
170         struct clk *cpu_clk;
171         struct private_data *priv;
172         struct device *cpu_dev;
173         struct regulator *cpu_reg;
174         unsigned long volt = 0, volt_old = 0, tol = 0;
175         unsigned int old_freq, new_freq;
176         long freq_Hz, freq_exact;
177         int ret;
178         int index;
179         struct cpufreq_freqs freqs;
180
181         if(!policy)
182                 return -ENODEV;
183
184         sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
185         freq_table = sprd_policy->freq_table;
186
187         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
188                                              relation, &index);
189         if (ret) {
190                 pr_err("failed to match target freqency %d: %d\n",
191                        target_freq, ret);
192                 return ret;
193         }
194
195         cpu_clk = sprd_policy->clk;
196         priv = sprd_policy->driver_data;
197         cpu_dev = priv->cpu_dev;
198         cpu_reg = priv->cpu_reg;
199
200         freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
201         if (freq_Hz <= 0)
202                 freq_Hz = freq_table[index].frequency * 1000;
203
204         freq_exact = freq_Hz;
205         new_freq = freq_Hz / 1000;
206         old_freq = clk_get_rate(cpu_clk) / 1000;
207         freqs.old = old_freq;
208         freqs.new = new_freq;
209
210         if (!IS_ERR(cpu_reg)) {
211                 unsigned long opp_freq;
212
213                 rcu_read_lock();
214                 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
215                 if (IS_ERR(opp)) {
216                         rcu_read_unlock();
217                         dev_err(cpu_dev, "failed to find OPP for %ld\n",
218                                 freq_Hz);
219                         return PTR_ERR(opp);
220                 }
221                 volt = opp_get_voltage(opp);
222                 opp_freq = opp_get_freq(opp);
223                 rcu_read_unlock();
224                 tol = volt * priv->voltage_tolerance / 100;
225                 volt_old = regulator_get_voltage(cpu_reg);
226                 pr_info("DVFS-dt:Found OPP: %ld kHz, %ld uV\n",
227                         opp_freq / 1000, volt);
228         }
229
230         pr_info("DVFS-dt:%u MHz, %ld mV --> %u MHz, %ld mV\n",
231                 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
232                 new_freq / 1000, volt ? volt / 1000 : -1);
233
234         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
235
236         /* scaling up?  scale voltage before frequency */
237         if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
238                 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
239                 if (ret) {
240                         pr_info("DVFS-dt:failed to scale voltage %d %d up: %d\n",
241                                 volt,tol,ret);
242                         return ret;
243                 }
244         }
245         /*
246         ret = clk_set_rate(cpu_clk, freq_exact);
247         */
248         ret = cpufreq_set_clock(policy,freq_exact);
249
250         if (ret) {
251                 pr_info("DVFS-dt:failed to set clock %d rate: %d\n",freq_exact, ret);
252                 if (!IS_ERR(cpu_reg) && volt_old > 0)
253                         regulator_set_voltage_tol(cpu_reg, volt_old, tol);
254                 return ret;
255         }
256
257
258         /* scaling down?  scale voltage after frequency */
259         if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
260                 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
261                 if (ret) {
262                         pr_info("DVFS-dt:failed to scale voltage %d %d down: %d\n",
263                                 volt,tol,ret);
264                         /*
265                         ret = clk_set_rate(cpu_clk, old_freq);
266                         */
267                         ret = cpufreq_set_clock(policy, old_freq);
268                         return ret;
269                 }
270         }
271
272         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
273
274         return ret;
275 }
276
277 static int verify(struct cpufreq_policy *policy)
278 {
279         struct cpufreq_policy_sprd * sprd_policy;
280         int ret = 0;
281         if(!policy)
282                 return -ENODEV;
283
284         sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
285
286         ret = cpufreq_frequency_table_verify(policy, sprd_policy->freq_table);
287
288         if(ret)
289                 pr_err("[cpufreq-dt-sprd] verify failed %d\n",ret);
290
291         return ret;
292 }
293
294 static int allocate_resources(int cpu, struct device **cdev,
295                               struct regulator **creg, struct clk **cclk)
296 {
297         struct device *cpu_dev;
298         struct device *cpu_dev_reg;
299         struct regulator *cpu_reg;
300         struct clk *cpu_clk;
301         int ret = 0;
302         char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
303         struct regulator_dev *rdev;
304
305         cpu_dev = get_cpu_device(cpu);
306         cpu_dev->of_node = of_get_cpu_node(cpu, NULL);
307
308         if (!cpu_dev) {
309                 pr_err("failed to get cpu%d device\n", cpu);
310                 return -ENODEV;
311         }
312
313         /* Try "cpu0" for older DTs */
314         if (!cpu){
315                 reg = reg_cpu0;
316                 cpu_dev_reg = cpu_dev;
317         }
318         else{
319                 reg = "vddbigarm";
320                 cpu_dev_reg = NULL;
321         }
322
323         cpu_reg = regulator_get(cpu_dev_reg, reg);
324
325 try_again:
326         if (IS_ERR(cpu_reg)) {
327                 /*
328                  * If cpu's regulator supply node is present, but regulator is
329                  * not yet registered, we should try defering probe.
330                  */
331                 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
332                         dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
333                                 cpu);
334                         pr_err("[allocate_resources] cpu%d failed defered\n",cpu);
335                         return -EPROBE_DEFER;
336                 }
337
338                 /* Try with "cpu-supply" */
339                 if (reg == reg_cpu0) {
340                         reg = reg_cpu;
341                         goto try_again;
342                 }
343
344                 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
345                         cpu, PTR_ERR(cpu_reg));
346                 pr_err("[allocate_resources] no regulator for cpu%d\n",cpu) ;
347         }
348
349         cpu_clk = clk_get(cpu_dev, NULL);
350         if (IS_ERR(cpu_clk)) {
351                 /* put regulator */
352                 if (!IS_ERR(cpu_reg))
353                         regulator_put(cpu_reg);
354
355                 ret = PTR_ERR(cpu_clk);
356
357                 /*
358                  * If cpu's clk node is present, but clock is not yet
359                  * registered, we should try defering probe.
360                  */
361                 if (ret == -EPROBE_DEFER){
362                         dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
363                         pr_err("[allocate_resources] cpu%d clock not ready, retry\n",cpu);
364                 }
365                 else{
366                         dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
367                                 ret);
368                         pr_err("[allocate_resources] failed to get cpu%d clock: %d\n", cpu,ret);
369                 }
370         } else {
371                 *cdev = cpu_dev;
372                 *creg = cpu_reg;
373                 *cclk = cpu_clk;
374         }
375         /*
376         rdev = regulator_get_drvdata(cpu_reg);
377         pr_info("[cpufreq-dt] cpu %d resource allocated cpu_reg %s\n",cpu,rdev->desc->supply_name);
378         */
379         return ret;
380 }
381
382 static int cpufreq_init(struct cpufreq_policy *policy)
383 {
384         //struct cpufreq_dt_platform_data *pd;
385         struct cpufreq_frequency_table *freq_table;
386         struct device_node *np;
387         struct private_data *priv;
388         struct cpufreq_policy_sprd * sprd_policy;
389         struct device *cpu_dev;
390         struct regulator *cpu_reg;
391         struct clk *cpu_clk;
392         unsigned long min_uV = ~0, max_uV = 0;
393         unsigned int transition_latency;
394         int ret;
395         int j = 0;
396
397         if(!policy)
398                 return -ENODEV;
399
400         sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
401
402         ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
403         if (ret) {
404                 pr_err("[cpufreq-dt] %s: Failed to allocate resources: %d\n", __func__, ret);
405                 return ret;
406         }
407
408         np = of_node_get(cpu_dev->of_node);
409         if (!np) {
410                 pr_err("[cpufreq-dt] failed to find cpu%d node\n", policy->cpu);
411                 ret = -ENOENT;
412                 goto out_put_reg_clk;
413         }
414
415         /*init the opp table of device */
416         if (sprd_policy->opp_initialized != true) {
417                 /* OPPs might be populated at runtime, don't check for error here */
418                 of_init_opp_table(cpu_dev);
419                 sprd_policy->opp_initialized = true;
420         }
421
422         /*
423          * But we need OPP table to function so if it is not there let's
424          * give platform code chance to provide it for us.
425          */
426         ret = opp_get_opp_count(cpu_dev);
427         if (ret <= 0) {
428                 pr_err("[cpufreq-dt] OPP table is not ready, deferring probe\n");
429                 ret = -EPROBE_DEFER;
430                 goto out_free_opp;
431         }
432
433         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434         if (!priv) {
435                 ret = -ENOMEM;
436                 goto out_free_opp;
437         }
438
439         of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
440
441         if (of_property_read_u32(np, "clock-latency", &transition_latency))
442                 transition_latency = CPUFREQ_ETERNAL;
443
444         if (!IS_ERR(cpu_reg)) {
445                 unsigned long opp_freq = 0;
446
447                 /*
448                  * Disable any OPPs where the connected regulator isn't able to
449                  * provide the specified voltage and record minimum and maximum
450                  * voltage levels.
451                  */
452                 while (1) {
453                         struct opp *opp;
454                         unsigned long opp_uV, tol_uV;
455
456                         rcu_read_lock();
457                         opp = opp_find_freq_ceil(cpu_dev, &opp_freq);
458                         if (IS_ERR(opp)) {
459                                 rcu_read_unlock();
460                                 pr_err("[cpufreq-dt]:opp_find_freq_ceil failed\n");
461                                 break;
462                         }
463                         opp_uV = opp_get_voltage(opp);
464                         rcu_read_unlock();
465
466                         tol_uV = opp_uV * priv->voltage_tolerance / 100;
467                         if (regulator_is_supported_voltage(cpu_reg, opp_uV,
468                                                            opp_uV + tol_uV)) {
469                                 if (opp_uV < min_uV)
470                                         min_uV = opp_uV;
471                                 if (opp_uV > max_uV)
472                                         max_uV = opp_uV;
473                         } else {
474                                 opp_disable(cpu_dev, opp_freq);
475                         }
476
477                         opp_freq++;
478                 }
479
480                 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
481                 if (ret > 0)
482                         transition_latency += ret * 1000;
483         }
484
485         /*
486         FIXME:set the proper value here according to the HW properties
487         */
488         transition_latency = 100 * 1000; /*ns*/
489
490         ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
491         if (ret) {
492                 pr_err("[cpufreq-dt]failed to init cpufreq table: %d\n", ret);
493                 goto out_free_priv;
494         }
495
496         priv->cpu_dev = cpu_dev;
497         priv->cpu_reg = cpu_reg;
498         sprd_policy->driver_data = priv;
499
500         sprd_policy->clk = cpu_clk;
501         ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
502         if (ret) {
503                 pr_err("[cpufreq-dt]%s: invalid frequency table: %d\n", __func__,
504                         ret);
505                 goto out_free_cpufreq_table;
506         }
507
508         sprd_policy->freq_table = freq_table;
509         policy->cpuinfo.transition_latency = transition_latency;
510
511 /*
512         pd = cpufreq_get_driver_data();
513         if (!pd || !pd->independent_clocks)
514                 cpumask_setall(policy->cpus);
515 */
516         pr_info("[cpufreq-dt-sprd] before mask policy[%d]->cpus %x",policy->cpu,*policy->cpus);
517         cpumask_or(policy->cpus, policy->cpus, cpu_coregroup_mask(policy->cpu));
518         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
519         /*
520         FIX ME: it is possible that the sprd_policy will be re-initialized by cpufreq_add_dev->cpufreq-init. However, it shouldn't 
521         happen for there is condition judgement for if the policy has been initialized by the reason of they are in the same cluster
522         */
523         for_each_cpu(j, policy->cpus){
524                 memcpy(&per_cpu(cpufreq_cpu_data_sprd, j),sprd_policy,sizeof(*sprd_policy));
525         }
526         pr_info("[cpufreq-dt-sprd] after mask policy[%d]->cpus %x",policy->cpu,*policy->cpus);
527
528         of_node_put(np);
529
530         pr_info("[cpufreq-dt]: cpu %d cpufreq table initialized success\n", policy->cpu);
531
532         policy->cur = clk_get_rate(cpu_clk) / 1000;
533
534         pr_info("[cpufreq-dt]:policy->cur %d\n",policy->cur);
535         return 0;
536
537 out_free_cpufreq_table:
538         pr_err("[cpufreq-dt]:out_free_cpufreq_table err return\n");
539         opp_free_cpufreq_table(cpu_dev, &freq_table);
540 out_free_priv:
541         pr_err("[cpufreq-dt]:out_free_priv err return\n");
542         kfree(priv);
543 out_free_opp:
544         /*
545         of_free_opp_table(cpu_dev);
546         */
547         pr_err("[cpufreq-dt]:out_free_opp err return\n");
548         of_node_put(np);
549 out_put_reg_clk:
550         clk_put(cpu_clk);
551         if (!IS_ERR(cpu_reg))
552                 regulator_put(cpu_reg);
553
554         pr_err("[cpufreq-dt]:out_put_reg_clk err return\n");
555         return ret;
556 }
557
558 static int cpufreq_exit(struct cpufreq_policy *policy)
559 {
560         struct cpufreq_policy_sprd * sprd_policy;
561         struct private_data *priv;
562
563         if(!policy)
564                 return -ENODEV;
565
566         sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
567         priv = sprd_policy->driver_data;
568
569         if (priv->cdev)
570                 cpufreq_cooling_unregister(priv->cdev);
571         opp_free_cpufreq_table(priv->cpu_dev, &sprd_policy->freq_table);
572         /*
573         of_free_opp_table(priv->cpu_dev);
574         */
575         clk_put(sprd_policy->clk);
576         if (!IS_ERR(priv->cpu_reg))
577                 regulator_put(priv->cpu_reg);
578         kfree(priv);
579
580         return 0;
581 }
582
583 static void cpufreq_ready(struct cpufreq_policy *policy)
584 {
585         struct private_data *priv;
586         struct device_node *np;
587         struct cpufreq_policy_sprd * sprd_policy;
588
589         if(!policy)
590                 return -ENODEV;
591
592         sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, policy->cpu);
593         priv = sprd_policy->driver_data;
594         np= of_node_get(priv->cpu_dev->of_node);
595         if (WARN_ON(!np))
596                 return;
597
598         /*
599          * For now, just loading the cooling device;
600          * thermal DT code takes care of matching them.
601          */
602         if (of_find_property(np, "#cooling-cells", NULL)) {
603                 priv->cdev = of_cpufreq_cooling_register(np,
604                                                          policy->related_cpus);
605                 if (IS_ERR(priv->cdev)) {
606                         dev_err(priv->cpu_dev,
607                                 "running cpufreq without cooling device: %ld\n",
608                                 PTR_ERR(priv->cdev));
609
610                         priv->cdev = NULL;
611                 }
612         }
613
614         of_node_put(np);
615 }
616
617 unsigned int cpufreq_generic_get(unsigned int cpu)
618 {
619         struct cpufreq_policy_sprd *sprd_policy = &per_cpu(cpufreq_cpu_data_sprd, cpu);
620
621         if (!sprd_policy || IS_ERR(sprd_policy->clk)) {
622                 pr_err("%s: No %s associated to cpu: %d\n",
623                        __func__, sprd_policy ? "clk" : "policy", cpu);
624                 return 0;
625         }
626
627         return clk_get_rate(sprd_policy->clk) / 1000;
628 }
629
630 static struct freq_attr *cpu0_cpufreq_attr[] = {
631         &cpufreq_freq_attr_scaling_available_freqs,
632         NULL,
633 };
634
635 static struct cpufreq_driver dt_cpufreq_driver = {
636         .flags = CPUFREQ_STICKY,
637         .verify = verify,
638         .target = set_target,
639         .get = cpufreq_generic_get,
640         .init = cpufreq_init,
641         .exit = cpufreq_exit,
642         .name = "cpufreq-dt",
643         /*
644         .ready = cpufreq_ready,
645         */
646         .attr = cpu0_cpufreq_attr,
647         .have_governor_per_policy = true,
648
649 };
650
651 static int sprd_dt_cpufreq_probe(struct platform_device *pdev)
652 {
653         struct device *cpu_dev;
654         struct regulator *cpu_reg;
655         struct clk *cpu_clk;
656         int ret;
657
658         /*
659          * All per-cluster (CPUs sharing clock/voltages) initialization is done
660          * from ->init(). In probe(), we just need to make sure that clk and
661          * regulators are available. Else defer probe and retry.
662          *
663          * FIXME: Is checking this only for CPU0 sufficient ?
664          */
665         ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
666         if (ret){
667                 pr_err("cpufreq-dt-sprd register failed %d\n",ret);
668                 return ret;
669         }
670         clk_put(cpu_clk);
671         if (!IS_ERR(cpu_reg))
672                 regulator_put(cpu_reg);
673
674         /*
675         dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
676         */
677
678         ret = cpufreq_register_driver(&dt_cpufreq_driver);
679         if (ret)
680                 pr_err("failed register driver: %d\n", ret);
681
682         return ret;
683 }
684
685 static int sprd_dt_cpufreq_remove(struct platform_device *pdev)
686 {
687         cpufreq_unregister_driver(&dt_cpufreq_driver);
688         return 0;
689 }
690
691 static struct platform_driver sprd_dt_cpufreq_platdrv = {
692         .driver = {
693                 .name   = "cpufreq-dt-sprd",
694         },
695         .probe          = sprd_dt_cpufreq_probe,
696         .remove         = sprd_dt_cpufreq_remove,
697 };
698 module_platform_driver(sprd_dt_cpufreq_platdrv);
699
700 MODULE_AUTHOR("zhaoyang.huang <zhaoyang.huang@spreadtrum.com>");
701 MODULE_DESCRIPTION("bL cpufreq driver");
702 MODULE_LICENSE("GPL");