2 * Copyright (C) 2012 Spreadtrum Communications Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/debugfs.h>
20 #include <linux/err.h>
21 #include <linux/platform_device.h>
22 #include <linux/clk.h>
23 #include <linux/clkdev.h>
24 #include <linux/cpufreq.h>
27 #include <mach/hardware.h>
28 #include <mach/regs_glb.h>
29 #include <mach/regs_ahb.h>
31 #include <ubi_uboot.h>
32 #include <asm/arch/sc8810_reg_base.h>
33 #include <asm/arch/regs_glb.h>
34 #include <asm/arch/__regs_ahb.h>
35 #include <linux/compiler.h>
37 #define __ffs(x) (ffs(x) - 1)
39 #define DEFINE_SPINLOCK(...)
40 #define spin_lock_irqsave(...)
41 #define spin_unlock_irqrestore(...)
44 #include <asm/arch/clock.h>
45 #include <asm/arch/__clock_tree.h>
47 //#ifndef CONFIG_NKERNEL
49 const u32 __clkinit0 __clkinit_begin = 0xeeeebbbb;
50 const u32 __clkinit2 __clkinit_end = 0xddddeeee;
52 const u32 __clkinit0 __clkinit_begin = &CLK_LK_clk_mpll;
53 const u32 __clkinit2 __clkinit_end = &CLK_LK_clk_mpll;
56 #ifndef CONFIG_NKERNEL
57 //#define debug0 printf
60 DEFINE_SPINLOCK(clocks_lock);
62 int clk_enable(struct clk *clk)
65 if (IS_ERR_OR_NULL(clk))
68 clk_enable(clk->parent);
70 spin_lock_irqsave(&clocks_lock, flags);
71 if ((clk->usage++) == 0 && clk->enable)
72 (clk->enable) (clk, 1, &flags);
73 spin_unlock_irqrestore(&clocks_lock, flags);
74 debug0("clk %p, usage %d\n", clk, clk->usage);
78 EXPORT_SYMBOL(clk_enable);
80 void clk_disable(struct clk *clk)
83 if (IS_ERR_OR_NULL(clk))
86 spin_lock_irqsave(&clocks_lock, flags);
87 if ((--clk->usage) == 0 && clk->enable)
88 (clk->enable) (clk, 0, &flags);
89 if (WARN(clk->usage < 0,
90 "warning: clock (%s) usage (%d)\n", clk->regs->name, clk->usage)) {
91 clk->usage = 0; /* force reset clock refcnt */
92 spin_unlock_irqrestore(&clocks_lock, flags);
95 spin_unlock_irqrestore(&clocks_lock, flags);
96 debug0("clk %p, usage %d\n", clk, clk->usage);
97 clk_disable(clk->parent);
100 EXPORT_SYMBOL(clk_disable);
103 * clk_force_disable - force disable clock output
106 * Forcibly disable the clock output.
107 * NOTE: this *will* disable the clock output even if other consumer
108 * devices have it enabled. This should be used for situations when device
109 * suspend or damage will likely occur if the devices is not disabled.
111 void clk_force_disable(struct clk *clk)
113 if (IS_ERR_OR_NULL(clk))
116 debug("clk %p, usage %d\n", clk, clk->usage);
117 while (clk->usage > 0) {
122 EXPORT_SYMBOL(clk_force_disable);
124 unsigned long clk_get_rate(struct clk *clk)
126 debug0("clk %p, rate %lu\n", clk, IS_ERR_OR_NULL(clk) ? -1 : clk->rate);
127 if (IS_ERR_OR_NULL(clk))
135 if (clk->ops != NULL && clk->ops->get_rate != NULL)
136 return (clk->ops->get_rate) (clk);
138 if (clk->parent != NULL)
139 return clk_get_rate(clk->parent);
144 EXPORT_SYMBOL(clk_get_rate);
146 long clk_round_rate(struct clk *clk, unsigned long rate)
148 if (!IS_ERR_OR_NULL(clk) && clk->ops && clk->ops->round_rate)
149 return (clk->ops->round_rate) (clk, rate);
154 EXPORT_SYMBOL(clk_round_rate);
156 int clk_set_rate(struct clk *clk, unsigned long rate)
160 debug0("clk %p, rate %lu\n", clk, rate);
161 if (IS_ERR_OR_NULL(clk) || rate == 0)
164 /* We do not default just do a clk->rate = rate as
165 * the clock may have been made this way by choice.
168 //WARN_ON(clk->ops == NULL);
169 //WARN_ON(clk->ops && clk->ops->set_rate == NULL);
171 if (clk->ops == NULL || clk->ops->set_rate == NULL)
174 spin_lock_irqsave(&clocks_lock, flags);
175 ret = (clk->ops->set_rate) (clk, rate);
176 spin_unlock_irqrestore(&clocks_lock, flags);
180 EXPORT_SYMBOL(clk_set_rate);
182 struct clk *clk_get_parent(struct clk *clk)
187 EXPORT_SYMBOL(clk_get_parent);
189 int clk_set_parent(struct clk *clk, struct clk *parent)
193 struct clk *old_parent = clk_get_parent(clk);
194 debug0("clk %p, parent %p <<< %p\n", clk, parent, old_parent);
195 if (IS_ERR_OR_NULL(clk) || IS_ERR(parent))
198 spin_lock_irqsave(&clocks_lock, flags);
199 if (clk->ops && clk->ops->set_parent)
200 ret = (clk->ops->set_parent) (clk, parent);
201 spin_unlock_irqrestore(&clocks_lock, flags);
203 #if defined(CONFIG_DEBUG_FS)
204 /* FIXME: call debugfs_rename() out of spin lock,
205 * maybe not match with the real parent-child relationship
206 * in some extreme scenes.
208 if (0 == ret && old_parent && old_parent->dent && clk->dent
209 && parent && parent->dent) {
210 debug0("directory dentry move %s to %s\n",
211 old_parent->regs->name, parent->regs->name);
212 debugfs_rename(old_parent->dent, clk->dent,
213 parent->dent, clk->regs->name);
219 EXPORT_SYMBOL(clk_set_parent);
221 static int sci_clk_enable(struct clk *c, int enable, unsigned long *pflags)
223 debug("clk %p (%s) enb %08x, %s\n", c, c->regs->name,
224 c->regs->enb.reg, enable ? "enable" : "disable");
226 BUG_ON(!c->regs->enb.reg);
227 if (c->regs->enb.reg & 1)
230 if (!c->regs->enb.mask) { /* enable matrix clock */
232 spin_unlock_irqrestore(&clocks_lock, *pflags);
234 clk_enable((struct clk *)c->regs->enb.reg);
236 clk_disable((struct clk *)c->regs->enb.reg);
238 spin_lock_irqsave(&clocks_lock, *pflags);
241 sci_glb_set(c->regs->enb.reg & ~1, c->regs->enb.mask);
243 sci_glb_clr(c->regs->enb.reg & ~1, c->regs->enb.mask);
248 static int sci_clk_is_enable(struct clk *c)
252 debug0("clk %p (%s) enb %08x\n", c, c->regs->name, c->regs->enb.reg);
254 BUG_ON(!c->regs->enb.reg);
255 if (!c->regs->enb.mask) { /* check matrix clock */
256 enable = ! !sci_clk_is_enable((struct clk *)c->regs->enb.reg);
259 ! !sci_glb_read(c->regs->enb.reg & ~1, c->regs->enb.mask);
262 if (c->regs->enb.reg & 1)
267 static int sci_clk_set_rate(struct clk *c, unsigned long rate)
270 debug("clk %p (%s) set rate %lu\n", c, c->regs->name, rate);
271 rate = clk_round_rate(c, rate);
272 div = clk_get_rate(c->parent) / rate - 1; //FIXME:
273 div_shift = __ffs(c->regs->div.mask);
274 debug0("clk %p (%s) pll div reg %08x, val %08x mask %08x\n", c,
275 c->regs->name, c->regs->div.reg, div << div_shift,
277 sci_glb_write(c->regs->div.reg, div << div_shift, c->regs->div.mask);
279 c->rate = 0; /* FIXME: auto update all children after new rate if need */
283 static unsigned long sci_clk_get_rate(struct clk *c)
285 u32 div = 0, div_shift;
287 div_shift = __ffs(c->regs->div.mask);
288 debug0("clk %p (%s) div reg %08x, shift %u msk %08x\n", c,
289 c->regs->name, c->regs->div.reg, div_shift, c->regs->div.mask);
290 rate = clk_get_rate(c->parent);
292 if (c->regs->div.reg)
293 div = sci_glb_read(c->regs->div.reg,
294 c->regs->div.mask) >> div_shift;
295 debug0("clk %p (%s) parent rate %lu, div %u\n", c, c->regs->name, rate,
297 c->rate = rate = rate / (div + 1); //FIXME:
298 debug0("clk %p (%s) get real rate %lu\n", c, c->regs->name, rate);
302 #define SHFT_PLL_REFIN ( 16 )
303 #define MASK_PLL_REFIN ( BIT(16)|BIT(17) )
304 static unsigned long sci_pll_get_refin_rate(struct clk *c)
307 const unsigned long refin[4] = { 2, 4, 4, 13 }; /* default refin 4M */
308 i = sci_glb_read(c->regs->div.reg, MASK_PLL_REFIN) >> SHFT_PLL_REFIN;
309 debug0("pll %p (%s) refin %d\n", c, c->regs->name, i);
310 return refin[i] * 1000000;
313 static unsigned long sci_pll_get_rate(struct clk *c)
315 u32 mn = 1, mn_shift;
317 mn_shift = __ffs(c->regs->div.mask);
318 debug0("pll %p (%s) mn reg %08x, shift %u msk %08x\n", c, c->regs->name,
319 c->regs->div.reg, mn_shift, c->regs->div.mask);
320 rate = clk_get_rate(c->parent);
321 if (0 == c->regs->div.reg) ;
322 else if (c->regs->div.reg < MAX_DIV) {
323 mn = c->regs->div.reg;
327 rate = sci_pll_get_refin_rate(c);
328 mn = sci_glb_read(c->regs->div.reg,
329 c->regs->div.mask) >> mn_shift;
334 debug0("pll %p (%s) get real rate %lu\n", c, c->regs->name, rate);
338 static unsigned long sci_clk_round_rate(struct clk *c, unsigned long rate)
340 debug0("clk %p (%s) round rate %lu\n", c, c->regs->name, rate);
344 static int sci_clk_set_parent(struct clk *c, struct clk *parent)
347 debug0("clk %p (%s) parent %p (%s)\n", c, c->regs->name,
348 parent, parent ? parent->regs->name : 0);
350 for (i = 0; i < c->regs->nr_sources; i++) {
351 if (c->regs->sources[i] == parent) {
352 u32 sel_shift = __ffs(c->regs->sel.mask);
353 debug0("pll sel reg %08x, val %08x, msk %08x\n",
354 c->regs->sel.reg, i << sel_shift,
356 if (c->regs->sel.reg)
357 sci_glb_write(c->regs->sel.reg, i << sel_shift,
361 c->rate = 0; /* FIXME: auto update clock rate after new parent */
366 WARN(1, "warning: clock (%s) not support parent (%s)\n",
367 c->regs->name, parent ? parent->regs->name : 0);
371 static int sci_clk_get_parent(struct clk *c)
374 u32 sel_shift = __ffs(c->regs->sel.mask);
375 debug0("pll sel reg %08x, val %08x, msk %08x\n",
376 c->regs->sel.reg, i << sel_shift, c->regs->sel.mask);
377 if (c->regs->sel.reg) {
378 i = sci_glb_read(c->regs->sel.reg,
379 c->regs->sel.mask) >> sel_shift;
384 static struct clk_ops generic_clk_ops = {
385 .set_rate = sci_clk_set_rate,
386 .get_rate = sci_clk_get_rate,
387 .round_rate = sci_clk_round_rate,
388 .set_parent = sci_clk_set_parent,
391 static struct clk_ops generic_pll_ops = {
393 .get_rate = sci_pll_get_rate,
395 .set_parent = sci_clk_set_parent,
398 /* debugfs support to trace clock tree hierarchy and attributes */
399 #if defined(CONFIG_DEBUG_FS)
400 static struct dentry *clk_debugfs_root;
401 static int __init clk_debugfs_register(struct clk *c)
403 char name[NAME_MAX], *p = name;
404 p += sprintf(p, "%s", c->regs->name);
406 if (IS_ERR_OR_NULL((c->dent =
407 debugfs_create_dir(name,
408 c->parent ? c->parent->dent :
411 if (IS_ERR_OR_NULL(debugfs_create_u32
412 ("usecount", S_IRUGO, c->dent, (u32 *) & c->usage)))
414 if (IS_ERR_OR_NULL(debugfs_create_u32
415 ("rate", S_IRUGO, c->dent, (u32 *) & c->rate)))
420 debugfs_remove_recursive(c->dent);
426 static __init int __clk_is_dummy_pll(struct clk *c)
428 return (c->regs->enb.reg & 1) || strstr(c->regs->name, "pll");
431 int __init sci_clk_register(struct clk_lookup *cl)
433 struct clk *c = cl->clk;
435 if (c->ops == NULL) {
436 c->ops = &generic_clk_ops;
437 if (c->rate) /* fixed OSC */
439 else if ((c->regs->div.reg >= 0 && c->regs->div.reg < MAX_DIV)
440 || strstr(c->regs->name, "pll")) {
441 c->ops = &generic_pll_ops;
446 ("clk %p (%s) rate %lu ops %p enb %08x sel %08x div %08x nr_sources %u\n",
447 c, c->regs->name, c->rate, c->ops, c->regs->enb.reg,
448 c->regs->sel.reg, c->regs->div.reg, c->regs->nr_sources);
450 if (c->enable == NULL && c->regs->enb.reg) {
451 c->enable = sci_clk_enable;
452 /* FIXME: dummy update some pll clocks usage */
453 if (sci_clk_is_enable(c) && __clk_is_dummy_pll(c)) {
458 if (!c->rate) { /* FIXME: dummy update clock parent and rate */
459 clk_set_parent(c, c->regs->sources[sci_clk_get_parent(c)]);
460 /* clk_set_rate(c, clk_get_rate(c)); */
465 #if defined(CONFIG_DEBUG_FS)
466 clk_debugfs_register(c);
471 static int __init sci_clock_dump(void)
473 struct clk_lookup *cl = (struct clk_lookup *)(&__clkinit_begin + 1);
474 while (cl < (struct clk_lookup *)&__clkinit_end) {
475 struct clk *c = cl->clk;
476 struct clk *p = clk_get_parent(c);
478 ("@@@clock[%s] is %sactive, usage %d, rate %lu, parent[%s]\n",
480 (c->enable == NULL || sci_clk_is_enable(c)) ? "" : "in",
481 c->usage, clk_get_rate(c), p ? p->regs->name : "none");
487 static int __clk_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
489 struct cpufreq_freqs *freq = data;
491 printk("%s (%u) dump cpu freq (%u %u %u %u)\n",
492 __func__, (unsigned int)val,
493 freq->cpu, freq->old, freq->new, (unsigned int)freq->flags);
498 static struct notifier_block __clk_cpufreq_notifier_block = {
499 .notifier_call = __clk_cpufreq_notifier
502 int __init sci_clock_init(void)
504 #if defined(CONFIG_DEBUG_FS)
505 clk_debugfs_root = debugfs_create_dir("clock", NULL);
506 if (IS_ERR_OR_NULL(clk_debugfs_root))
510 /* register all clock sources */
512 struct clk_lookup *cl =
513 (struct clk_lookup *)(&__clkinit_begin + 1);
514 debug0("%p (%x) -- %p -- %p (%x)\n",
515 &__clkinit_begin, __clkinit_begin, cl, &__clkinit_end,
517 while (cl < (struct clk_lookup *)&__clkinit_end) {
518 sci_clk_register(cl);
523 /* keep track of cpu frequency transitions */
524 cpufreq_register_notifier(&__clk_cpufreq_notifier_block,
525 CPUFREQ_TRANSITION_NOTIFIER);
530 #ifndef CONFIG_NKERNEL
531 arch_initcall(sci_clock_init);
532 late_initcall_sync(sci_clock_dump);
535 MODULE_LICENSE("GPL v2");
536 MODULE_DESCRIPTION("Spreadtrum Clock Driver");
537 MODULE_AUTHOR("robot <zhulin.lian@spreadtrum.com>");