2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
15 #include <linux/sh_clk.h>
17 #define CPG_CKSTP_BIT BIT(8)
19 static unsigned int sh_clk_read(struct clk *clk)
21 if (clk->flags & CLK_ENABLE_REG_8BIT)
22 return ioread8(clk->mapped_reg);
23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 return ioread16(clk->mapped_reg);
26 return ioread32(clk->mapped_reg);
29 static void sh_clk_write(int value, struct clk *clk)
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
36 iowrite32(value, clk->mapped_reg);
39 static unsigned int r8(const void __iomem *addr)
44 static unsigned int r16(const void __iomem *addr)
46 return ioread16(addr);
49 static unsigned int r32(const void __iomem *addr)
51 return ioread32(addr);
54 static int sh_clk_mstp_enable(struct clk *clk)
56 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
57 if (clk->status_reg) {
58 unsigned int (*read)(const void __iomem *addr);
60 void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
61 (phys_addr_t)clk->enable_reg + clk->mapped_reg;
63 if (clk->flags & CLK_ENABLE_REG_8BIT)
65 else if (clk->flags & CLK_ENABLE_REG_16BIT)
71 (read(mapped_status) & (1 << clk->enable_bit)) && i;
75 pr_err("cpg: failed to enable %p[%d]\n",
76 clk->enable_reg, clk->enable_bit);
83 static void sh_clk_mstp_disable(struct clk *clk)
85 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
88 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
89 .enable = sh_clk_mstp_enable,
90 .disable = sh_clk_mstp_disable,
91 .recalc = followparent_recalc,
94 int __init sh_clk_mstp_register(struct clk *clks, int nr)
100 for (k = 0; !ret && (k < nr); k++) {
102 clkp->ops = &sh_clk_mstp_clk_ops;
103 ret |= clk_register(clkp);
110 * Div/mult table lookup helpers
112 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
117 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
119 return clk_to_div_table(clk)->div_mult_table;
125 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
127 return clk_rate_table_round(clk, clk->freq_table, rate);
130 static unsigned long sh_clk_div_recalc(struct clk *clk)
132 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
135 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
136 table, clk->arch_flags ? &clk->arch_flags : NULL);
138 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
140 return clk->freq_table[idx].frequency;
143 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
145 struct clk_div_table *dt = clk_to_div_table(clk);
149 idx = clk_rate_table_find(clk, clk->freq_table, rate);
153 value = sh_clk_read(clk);
154 value &= ~(clk->div_mask << clk->enable_bit);
155 value |= (idx << clk->enable_bit);
156 sh_clk_write(value, clk);
158 /* XXX: Should use a post-change notifier */
165 static int sh_clk_div_enable(struct clk *clk)
167 if (clk->div_mask == SH_CLK_DIV6_MSK) {
168 int ret = sh_clk_div_set_rate(clk, clk->rate);
173 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
177 static void sh_clk_div_disable(struct clk *clk)
181 val = sh_clk_read(clk);
182 val |= CPG_CKSTP_BIT;
185 * div6 clocks require the divisor field to be non-zero or the
186 * above CKSTP toggle silently fails. Ensure that the divisor
187 * array is reset to its initial state on disable.
189 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
190 val |= clk->div_mask;
192 sh_clk_write(val, clk);
195 static struct sh_clk_ops sh_clk_div_clk_ops = {
196 .recalc = sh_clk_div_recalc,
197 .set_rate = sh_clk_div_set_rate,
198 .round_rate = sh_clk_div_round_rate,
201 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
202 .recalc = sh_clk_div_recalc,
203 .set_rate = sh_clk_div_set_rate,
204 .round_rate = sh_clk_div_round_rate,
205 .enable = sh_clk_div_enable,
206 .disable = sh_clk_div_disable,
209 static int __init sh_clk_init_parent(struct clk *clk)
216 if (!clk->parent_table || !clk->parent_num)
219 if (!clk->src_width) {
220 pr_err("sh_clk_init_parent: cannot select parent clock\n");
224 val = (sh_clk_read(clk) >> clk->src_shift);
225 val &= (1 << clk->src_width) - 1;
227 if (val >= clk->parent_num) {
228 pr_err("sh_clk_init_parent: parent table size failed\n");
232 clk_reparent(clk, clk->parent_table[val]);
234 pr_err("sh_clk_init_parent: unable to set parent");
241 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
242 struct clk_div_table *table, struct sh_clk_ops *ops)
246 int nr_divs = table->div_mult_table->nr_divisors;
247 int freq_table_size = sizeof(struct cpufreq_frequency_table);
251 freq_table_size *= (nr_divs + 1);
252 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
254 pr_err("%s: unable to alloc memory\n", __func__);
258 for (k = 0; !ret && (k < nr); k++) {
264 clkp->freq_table = freq_table + (k * freq_table_size);
265 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
267 ret = clk_register(clkp);
269 ret = sh_clk_init_parent(clkp);
278 static int sh_clk_div6_divisors[64] = {
279 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
280 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
281 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
282 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
285 static struct clk_div_mult_table div6_div_mult_table = {
286 .divisors = sh_clk_div6_divisors,
287 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
290 static struct clk_div_table sh_clk_div6_table = {
291 .div_mult_table = &div6_div_mult_table,
294 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
296 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
300 if (!clk->parent_table || !clk->parent_num)
303 /* Search the parent */
304 for (i = 0; i < clk->parent_num; i++)
305 if (clk->parent_table[i] == parent)
308 if (i == clk->parent_num)
311 ret = clk_reparent(clk, parent);
315 value = sh_clk_read(clk) &
316 ~(((1 << clk->src_width) - 1) << clk->src_shift);
318 sh_clk_write(value | (i << clk->src_shift), clk);
320 /* Rebuild the frequency table */
321 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
327 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
328 .recalc = sh_clk_div_recalc,
329 .round_rate = sh_clk_div_round_rate,
330 .set_rate = sh_clk_div_set_rate,
331 .enable = sh_clk_div_enable,
332 .disable = sh_clk_div_disable,
333 .set_parent = sh_clk_div6_set_parent,
336 int __init sh_clk_div6_register(struct clk *clks, int nr)
338 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
339 &sh_clk_div_enable_clk_ops);
342 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
344 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
345 &sh_clk_div6_reparent_clk_ops);
351 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
353 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
357 /* we really need a better way to determine parent index, but for
358 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
359 * no CLK_ENABLE_ON_INIT means external clock...
362 if (parent->flags & CLK_ENABLE_ON_INIT)
363 value = sh_clk_read(clk) & ~(1 << 7);
365 value = sh_clk_read(clk) | (1 << 7);
367 ret = clk_reparent(clk, parent);
371 sh_clk_write(value, clk);
373 /* Rebiuld the frequency table */
374 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
375 table, &clk->arch_flags);
380 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
381 .recalc = sh_clk_div_recalc,
382 .set_rate = sh_clk_div_set_rate,
383 .round_rate = sh_clk_div_round_rate,
384 .enable = sh_clk_div_enable,
385 .disable = sh_clk_div_disable,
386 .set_parent = sh_clk_div4_set_parent,
389 int __init sh_clk_div4_register(struct clk *clks, int nr,
390 struct clk_div4_table *table)
392 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
395 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
396 struct clk_div4_table *table)
398 return sh_clk_div_register_ops(clks, nr, table,
399 &sh_clk_div_enable_clk_ops);
402 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
403 struct clk_div4_table *table)
405 return sh_clk_div_register_ops(clks, nr, table,
406 &sh_clk_div4_reparent_clk_ops);
410 static unsigned long fsidiv_recalc(struct clk *clk)
414 value = __raw_readl(clk->mapping->base);
418 return clk->parent->rate;
420 return clk->parent->rate / value;
423 static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
425 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
428 static void fsidiv_disable(struct clk *clk)
430 __raw_writel(0, clk->mapping->base);
433 static int fsidiv_enable(struct clk *clk)
437 value = __raw_readl(clk->mapping->base) >> 16;
441 __raw_writel((value << 16) | 0x3, clk->mapping->base);
446 static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
450 idx = (clk->parent->rate / rate) & 0xffff;
452 __raw_writel(0, clk->mapping->base);
454 __raw_writel(idx << 16, clk->mapping->base);
459 static struct sh_clk_ops fsidiv_clk_ops = {
460 .recalc = fsidiv_recalc,
461 .round_rate = fsidiv_round_rate,
462 .set_rate = fsidiv_set_rate,
463 .enable = fsidiv_enable,
464 .disable = fsidiv_disable,
467 int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
469 struct clk_mapping *map;
472 for (i = 0; i < nr; i++) {
474 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
476 pr_err("%s: unable to alloc memory\n", __func__);
480 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
481 map->phys = (phys_addr_t)clks[i].enable_reg;
484 clks[i].enable_reg = 0; /* remove .enable_reg */
485 clks[i].ops = &fsidiv_clk_ops;
486 clks[i].mapping = map;
488 clk_register(&clks[i]);