2 * Copyright (C) 2012 Spreadtrum Communications Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <ubi_uboot.h>
16 #include <linux/compiler.h>
18 #define __ffs(x) (ffs(x) - 1)
20 #define DEFINE_SPINLOCK(...)
21 #define spin_lock_irqsave(...)
22 #define spin_unlock_irqrestore(...)
25 #include <asm/arch/sprd_reg.h>
26 #include <asm/arch/clock.h>
27 #include <asm/arch/__clock_tree.h>
30 const u32 __clkinit0 __clkinit_begin = 0xeeeebbbb;
31 const u32 __clkinit2 __clkinit_end = 0xddddeeee;
33 const u32 __clkinit0 __clkinit_begin = &CLK_LK_clk_mpll;
34 const u32 __clkinit2 __clkinit_end = &CLK_LK_clk_mpll;
37 //#define debug0 printf
39 DEFINE_SPINLOCK(clocks_lock);
41 int clk_enable(struct clk *clk)
44 if (IS_ERR_OR_NULL(clk))
47 clk_enable(clk->parent);
49 spin_lock_irqsave(&clocks_lock, flags);
50 if ((clk->usage++) == 0 && clk->enable)
51 (clk->enable) (clk, 1, &flags);
52 spin_unlock_irqrestore(&clocks_lock, flags);
53 debug0("clk %p, usage %d\n", clk, clk->usage);
57 EXPORT_SYMBOL(clk_enable);
59 void clk_disable(struct clk *clk)
62 if (IS_ERR_OR_NULL(clk))
65 spin_lock_irqsave(&clocks_lock, flags);
66 if ((--clk->usage) == 0 && clk->enable)
67 (clk->enable) (clk, 0, &flags);
68 if (WARN(clk->usage < 0,
69 "warning: clock (%s) usage (%d)\n", clk->regs->name, clk->usage)) {
70 clk->usage = 0; /* force reset clock refcnt */
71 spin_unlock_irqrestore(&clocks_lock, flags);
74 spin_unlock_irqrestore(&clocks_lock, flags);
75 debug0("clk %p, usage %d\n", clk, clk->usage);
76 clk_disable(clk->parent);
79 EXPORT_SYMBOL(clk_disable);
82 * clk_force_disable - force disable clock output
85 * Forcibly disable the clock output.
86 * NOTE: this *will* disable the clock output even if other consumer
87 * devices have it enabled. This should be used for situations when device
88 * suspend or damage will likely occur if the devices is not disabled.
90 void clk_force_disable(struct clk *clk)
92 if (IS_ERR_OR_NULL(clk))
95 debug("clk %p, usage %d\n", clk, clk->usage);
96 while (clk->usage > 0) {
101 EXPORT_SYMBOL(clk_force_disable);
103 unsigned long clk_get_rate(struct clk *clk)
105 debug0("clk %p, rate %lu\n", clk, IS_ERR_OR_NULL(clk) ? -1 : clk->rate);
106 if (IS_ERR_OR_NULL(clk))
114 if (clk->ops != NULL && clk->ops->get_rate != NULL)
115 return (clk->ops->get_rate) (clk);
117 if (clk->parent != NULL)
118 return clk_get_rate(clk->parent);
123 EXPORT_SYMBOL(clk_get_rate);
125 long clk_round_rate(struct clk *clk, unsigned long rate)
127 if (!IS_ERR_OR_NULL(clk) && clk->ops && clk->ops->round_rate)
128 return (clk->ops->round_rate) (clk, rate);
133 EXPORT_SYMBOL(clk_round_rate);
135 int clk_set_rate(struct clk *clk, unsigned long rate)
139 debug0("clk %p, rate %lu\n", clk, rate);
140 if (IS_ERR_OR_NULL(clk) || rate == 0)
143 /* We do not default just do a clk->rate = rate as
144 * the clock may have been made this way by choice.
147 //WARN_ON(clk->ops == NULL);
148 //WARN_ON(clk->ops && clk->ops->set_rate == NULL);
150 if (clk->ops == NULL || clk->ops->set_rate == NULL)
153 spin_lock_irqsave(&clocks_lock, flags);
154 ret = (clk->ops->set_rate) (clk, rate);
155 spin_unlock_irqrestore(&clocks_lock, flags);
159 EXPORT_SYMBOL(clk_set_rate);
161 struct clk *clk_get_parent(struct clk *clk)
166 EXPORT_SYMBOL(clk_get_parent);
168 int clk_set_parent(struct clk *clk, struct clk *parent)
172 #if defined(CONFIG_DEBUG_FS)
173 struct clk *old_parent = clk_get_parent(clk);
174 debug0("clk %p, parent %p <<< %p\n", clk, parent, old_parent);
176 if (IS_ERR_OR_NULL(clk) || IS_ERR(parent))
179 spin_lock_irqsave(&clocks_lock, flags);
180 if (clk->ops && clk->ops->set_parent)
181 ret = (clk->ops->set_parent) (clk, parent);
182 spin_unlock_irqrestore(&clocks_lock, flags);
184 #if defined(CONFIG_DEBUG_FS)
185 /* FIXME: call debugfs_rename() out of spin lock,
186 * maybe not match with the real parent-child relationship
187 * in some extreme scenes.
189 if (0 == ret && old_parent && old_parent->dent && clk->dent
190 && parent && parent->dent) {
191 debug0("directory dentry move %s to %s\n",
192 old_parent->regs->name, parent->regs->name);
193 debugfs_rename(old_parent->dent, clk->dent,
194 parent->dent, clk->regs->name);
200 EXPORT_SYMBOL(clk_set_parent);
202 static int sci_clk_enable(struct clk *c, int enable, unsigned long *pflags)
204 debug("clk %p (%s) enb %08x, %s\n", c, c->regs->name,
205 c->regs->enb.reg, enable ? "enable" : "disable");
207 BUG_ON(!c->regs->enb.reg);
208 if (c->regs->enb.reg & 1)
211 if (!c->regs->enb.mask) { /* enable matrix clock */
213 spin_unlock_irqrestore(&clocks_lock, *pflags);
215 clk_enable((struct clk *)c->regs->enb.reg);
217 clk_disable((struct clk *)c->regs->enb.reg);
219 spin_lock_irqsave(&clocks_lock, *pflags);
222 sci_glb_set(c->regs->enb.reg & ~1, c->regs->enb.mask);
224 sci_glb_clr(c->regs->enb.reg & ~1, c->regs->enb.mask);
229 static int sci_clk_is_enable(struct clk *c)
233 debug0("clk %p (%s) enb %08x\n", c, c->regs->name, c->regs->enb.reg);
235 BUG_ON(!c->regs->enb.reg);
236 if (!c->regs->enb.mask) { /* check matrix clock */
237 enable = ! !sci_clk_is_enable((struct clk *)c->regs->enb.reg);
240 ! !sci_glb_read(c->regs->enb.reg & ~1, c->regs->enb.mask);
243 if (c->regs->enb.reg & 1)
248 static int sci_clk_set_rate(struct clk *c, unsigned long rate)
251 debug("clk %p (%s) set rate %lu\n", c, c->regs->name, rate);
252 rate = clk_round_rate(c, rate);
253 div = clk_get_rate(c->parent) / rate - 1; //FIXME:
254 div_shift = __ffs(c->regs->div.mask);
255 debug0("clk %p (%s) pll div reg %08x, val %08x mask %08x\n", c,
256 c->regs->name, c->regs->div.reg, div << div_shift,
258 sci_glb_write(c->regs->div.reg, div << div_shift, c->regs->div.mask);
260 c->rate = 0; /* FIXME: auto update all children after new rate if need */
264 static unsigned long sci_clk_get_rate(struct clk *c)
266 u32 div = 0, div_shift;
268 div_shift = __ffs(c->regs->div.mask);
269 debug0("clk %p (%s) div reg %08x, shift %u msk %08x\n", c,
270 c->regs->name, c->regs->div.reg, div_shift, c->regs->div.mask);
271 rate = clk_get_rate(c->parent);
273 if (c->regs->div.reg)
274 div = sci_glb_read(c->regs->div.reg,
275 c->regs->div.mask) >> div_shift;
276 debug0("clk %p (%s) parent rate %lu, div %u\n", c, c->regs->name, rate,
278 c->rate = rate = rate / (div + 1); //FIXME:
279 debug0("clk %p (%s) get real rate %lu\n", c, c->regs->name, rate);
283 #define SHFT_PLL_REFIN ( 16 )
284 #define MASK_PLL_REFIN ( BIT(16)|BIT(17) )
285 static unsigned long sci_pll_get_refin_rate(struct clk *c)
288 const unsigned long refin[4] = { 2, 4, 4, 13 }; /* default refin 4M */
289 i = sci_glb_read(c->regs->div.reg, MASK_PLL_REFIN) >> SHFT_PLL_REFIN;
290 debug0("pll %p (%s) refin %d\n", c, c->regs->name, i);
291 return refin[i] * 1000000;
294 static unsigned long sci_pll_get_rate(struct clk *c)
296 u32 mn = 1, mn_shift;
298 mn_shift = __ffs(c->regs->div.mask);
299 debug0("pll %p (%s) mn reg %08x, shift %u msk %08x\n", c, c->regs->name,
300 c->regs->div.reg, mn_shift, c->regs->div.mask);
301 rate = clk_get_rate(c->parent);
302 if (0 == c->regs->div.reg) ;
303 else if (c->regs->div.reg < MAX_DIV) {
304 mn = c->regs->div.reg;
308 rate = sci_pll_get_refin_rate(c);
309 mn = sci_glb_read(c->regs->div.reg,
310 c->regs->div.mask) >> mn_shift;
315 debug0("pll %p (%s) get real rate %lu\n", c, c->regs->name, rate);
319 static unsigned long sci_clk_round_rate(struct clk *c, unsigned long rate)
321 debug0("clk %p (%s) round rate %lu\n", c, c->regs->name, rate);
325 static int sci_clk_set_parent(struct clk *c, struct clk *parent)
328 debug0("clk %p (%s) parent %p (%s)\n", c, c->regs->name,
329 parent, parent ? parent->regs->name : 0);
331 for (i = 0; i < c->regs->nr_sources; i++) {
332 if (c->regs->sources[i] == parent) {
333 u32 sel_shift = __ffs(c->regs->sel.mask);
334 debug0("pll sel reg %08x, val %08x, msk %08x\n",
335 c->regs->sel.reg, i << sel_shift,
337 if (c->regs->sel.reg)
338 sci_glb_write(c->regs->sel.reg, i << sel_shift,
342 c->rate = 0; /* FIXME: auto update clock rate after new parent */
347 WARN(1, "warning: clock (%s) not support parent (%s)\n",
348 c->regs->name, parent ? parent->regs->name : 0);
352 static int sci_clk_get_parent(struct clk *c)
355 u32 sel_shift = __ffs(c->regs->sel.mask);
356 debug0("pll sel reg %08x, val %08x, msk %08x\n",
357 c->regs->sel.reg, i << sel_shift, c->regs->sel.mask);
358 if (c->regs->sel.reg) {
359 i = sci_glb_read(c->regs->sel.reg,
360 c->regs->sel.mask) >> sel_shift;
365 static struct clk_ops generic_clk_ops = {
366 .set_rate = sci_clk_set_rate,
367 .get_rate = sci_clk_get_rate,
368 .round_rate = sci_clk_round_rate,
369 .set_parent = sci_clk_set_parent,
372 static struct clk_ops generic_pll_ops = {
374 .get_rate = sci_pll_get_rate,
376 .set_parent = sci_clk_set_parent,
379 /* debugfs support to trace clock tree hierarchy and attributes */
380 #if defined(CONFIG_DEBUG_FS)
381 static struct dentry *clk_debugfs_root;
382 static int __init clk_debugfs_register(struct clk *c)
384 char name[NAME_MAX], *p = name;
385 p += sprintf(p, "%s", c->regs->name);
387 if (IS_ERR_OR_NULL((c->dent =
388 debugfs_create_dir(name,
389 c->parent ? c->parent->dent :
392 if (IS_ERR_OR_NULL(debugfs_create_u32
393 ("usecount", S_IRUGO, c->dent, (u32 *) & c->usage)))
395 if (IS_ERR_OR_NULL(debugfs_create_u32
396 ("rate", S_IRUGO, c->dent, (u32 *) & c->rate)))
401 debugfs_remove_recursive(c->dent);
407 static __init int __clk_is_dummy_pll(struct clk *c)
409 return (c->regs->enb.reg & 1) || strstr(c->regs->name, "pll");
412 int __init sci_clk_register(struct clk_lookup *cl)
414 struct clk *c = cl->clk;
416 if (c->ops == NULL) {
417 c->ops = &generic_clk_ops;
418 if (c->rate) /* fixed OSC */
420 else if ((c->regs->div.reg >= 0 && c->regs->div.reg < MAX_DIV)
421 || strstr(c->regs->name, "pll")) {
422 c->ops = &generic_pll_ops;
427 ("clk %p (%s) rate %lu ops %p enb %08x sel %08x div %08x nr_sources %u\n",
428 c, c->regs->name, c->rate, c->ops, c->regs->enb.reg,
429 c->regs->sel.reg, c->regs->div.reg, c->regs->nr_sources);
431 if (c->enable == NULL && c->regs->enb.reg) {
432 c->enable = sci_clk_enable;
433 /* FIXME: dummy update some pll clocks usage */
434 if (sci_clk_is_enable(c) && __clk_is_dummy_pll(c)) {
439 if (!c->rate) { /* FIXME: dummy update clock parent and rate */
440 clk_set_parent(c, c->regs->sources[sci_clk_get_parent(c)]);
441 /* clk_set_rate(c, clk_get_rate(c)); */
446 #if defined(CONFIG_DEBUG_FS)
447 clk_debugfs_register(c);
452 static int __init sci_clock_dump(void)
454 struct clk_lookup *cl = (struct clk_lookup *)(&__clkinit_begin + 1);
455 while (cl < (struct clk_lookup *)&__clkinit_end) {
456 struct clk *c = cl->clk;
457 struct clk *p = clk_get_parent(c);
459 ("@@@clock[%s] is %sactive, usage %d, rate %lu, parent[%s]\n",
461 (c->enable == NULL || sci_clk_is_enable(c)) ? "" : "in",
462 c->usage, clk_get_rate(c), p ? p->regs->name : "none");
468 static int __clk_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
470 struct cpufreq_freqs *freq = data;
472 printk("%s (%u) dump cpu freq (%u %u %u %u)\n",
473 __func__, (unsigned int)val,
474 freq->cpu, freq->old, freq->new, (unsigned int)freq->flags);
479 static struct notifier_block __clk_cpufreq_notifier_block = {
480 .notifier_call = __clk_cpufreq_notifier
483 int __init sci_clock_init(void)
485 __raw_writel(__raw_readl(REG_PMU_APB_PD_MM_TOP_CFG)
486 & ~(BIT_PD_MM_TOP_FORCE_SHUTDOWN), REG_PMU_APB_PD_MM_TOP_CFG);
488 __raw_writel(__raw_readl(REG_PMU_APB_PD_GPU_TOP_CFG)
489 & ~(BIT_PD_GPU_TOP_FORCE_SHUTDOWN), REG_PMU_APB_PD_GPU_TOP_CFG);
491 #if defined(CONFIG_DEBUG_FS)
492 clk_debugfs_root = debugfs_create_dir("clock", NULL);
493 if (IS_ERR_OR_NULL(clk_debugfs_root))
497 /* register all clock sources */
499 struct clk_lookup *cl =
500 (struct clk_lookup *)(&__clkinit_begin + 1);
501 debug0("%p (%x) -- %p -- %p (%x)\n",
502 &__clkinit_begin, __clkinit_begin, cl, &__clkinit_end,
504 while (cl < (struct clk_lookup *)&__clkinit_end) {
505 sci_clk_register(cl);
510 /* keep track of cpu frequency transitions */
511 cpufreq_register_notifier(&__clk_cpufreq_notifier_block,
512 CPUFREQ_TRANSITION_NOTIFIER);
517 #ifndef CONFIG_NKERNEL
518 arch_initcall(sci_clock_init);
519 late_initcall_sync(sci_clock_dump);
522 MODULE_LICENSE("GPL v2");
523 MODULE_DESCRIPTION("Spreadtrum Clock Driver");
524 MODULE_AUTHOR("robot <zhulin.lian@spreadtrum.com>");