1 // SPDX-License-Identifier: GPL-2.0
3 * StarFive JH7110 Clock Generator Driver
5 * Copyright (C) 2022 Xingyu Wu <xingyu.wu@starfivetech.com>
8 #include <linux/bits.h>
10 #include <linux/clk-provider.h>
11 #include <linux/device.h>
12 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/mod_devicetable.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/of_device.h>
19 #include <linux/pm_runtime.h>
21 #include <dt-bindings/clock/starfive-jh7110-clkgen.h>
22 #include "clk-starfive-jh7110.h"
23 #include "clk-starfive-jh7110-pll.h"
25 static struct jh7110_clk * __init jh7110_clk_from(struct clk_hw *hw)
27 return container_of(hw, struct jh7110_clk, hw);
30 static struct jh7110_clk_priv *jh7110_priv_from(struct jh7110_clk *clk)
32 return container_of(clk, struct jh7110_clk_priv, reg[clk->idx]);
35 void __iomem *jh7110_clk_reg_addr_get(struct jh7110_clk *clk)
38 struct jh7110_clk_priv *priv = jh7110_priv_from(clk);
40 if (clk->reg_flags == JH7110_CLK_SYS_FLAG)
41 reg = priv->sys_base + 4 * clk->idx;
42 else if (clk->reg_flags == JH7110_CLK_STG_FLAG)
43 reg = priv->stg_base + 4 * (clk->idx - JH7110_CLK_SYS_REG_END);
44 else if (clk->reg_flags == JH7110_CLK_AON_FLAG)
45 reg = priv->aon_base + 4 * (clk->idx - JH7110_CLK_STG_REG_END);
46 else if (clk->reg_flags == JH7110_CLK_VOUT_FLAG)
47 reg = priv->vout_base + 4 * clk->idx;
48 else if (clk->reg_flags == JH7110_CLK_ISP_FLAG)
49 reg = priv->isp_base + 4 * clk->idx;
54 static u32 jh7110_clk_reg_get(struct jh7110_clk *clk)
56 void __iomem *reg = jh7110_clk_reg_addr_get(clk);
58 if ((clk->reg_flags == JH7110_CLK_ISP_FLAG) || (clk->reg_flags == JH7110_CLK_VOUT_FLAG)) {
60 struct jh7110_clk_priv *priv = jh7110_priv_from(clk);
62 if (pm_runtime_suspended(priv->dev)) {
63 ret = pm_runtime_get_sync(priv->dev);
65 dev_err(priv->dev, "cannot resume device :%d.\n", ret);
68 pm_runtime_put(priv->dev);
72 return readl_relaxed(reg);
75 static void jh7110_clk_reg_rmw(struct jh7110_clk *clk, u32 mask, u32 value)
77 struct jh7110_clk_priv *priv = jh7110_priv_from(clk);
78 void __iomem *reg = jh7110_clk_reg_addr_get(clk);
81 spin_lock_irqsave(&priv->rmw_lock, flags);
82 if ((clk->idx == JH7110_UART3_CLK_CORE
83 || clk->idx == JH7110_UART4_CLK_CORE
84 || clk->idx == JH7110_UART5_CLK_CORE)
85 && (value != JH7110_CLK_ENABLE))
87 value |= jh7110_clk_reg_get(clk) & ~mask;
88 writel_relaxed(value, reg);
89 spin_unlock_irqrestore(&priv->rmw_lock, flags);
92 static int jh7110_clk_enable(struct clk_hw *hw)
94 struct jh7110_clk *clk = jh7110_clk_from(hw);
96 jh7110_clk_reg_rmw(clk, JH7110_CLK_ENABLE, JH7110_CLK_ENABLE);
100 static void jh7110_clk_disable(struct clk_hw *hw)
102 struct jh7110_clk *clk = jh7110_clk_from(hw);
104 jh7110_clk_reg_rmw(clk, JH7110_CLK_ENABLE, 0);
107 static int jh7110_clk_is_enabled(struct clk_hw *hw)
109 struct jh7110_clk *clk = jh7110_clk_from(hw);
111 return !!(jh7110_clk_reg_get(clk) & JH7110_CLK_ENABLE);
114 static unsigned long jh7110_clk_recalc_rate(struct clk_hw *hw,
115 unsigned long parent_rate)
117 struct jh7110_clk *clk = jh7110_clk_from(hw);
118 u32 div = jh7110_clk_reg_get(clk) & JH7110_CLK_DIV_MASK;
120 if (clk->idx == JH7110_UART3_CLK_CORE
121 || clk->idx == JH7110_UART4_CLK_CORE
122 || clk->idx == JH7110_UART5_CLK_CORE)
125 return div ? parent_rate / div : 0;
128 static int jh7110_clk_determine_rate(struct clk_hw *hw,
129 struct clk_rate_request *req)
131 struct jh7110_clk *clk = jh7110_clk_from(hw);
132 unsigned long parent = req->best_parent_rate;
133 unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
134 unsigned long div = min_t(unsigned long,
135 DIV_ROUND_UP(parent, rate), clk->max_div);
136 unsigned long result = parent / div;
139 * we want the result clamped by min_rate and max_rate if possible:
140 * case 1: div hits the max divider value, which means it's less than
141 * parent / rate, so the result is greater than rate and min_rate in
142 * particular. we can't do anything about result > max_rate because the
143 * divider doesn't go any further.
144 * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
145 * always lower or equal to rate and max_rate. however the result may
146 * turn out lower than min_rate, but then the next higher rate is fine:
147 * div - 1 = ceil(parent / rate) - 1 < parent / rate
149 * min_rate <= rate < parent / (div - 1)
151 if (result < req->min_rate && div > 1)
152 result = parent / (div - 1);
158 static int jh7110_clk_set_rate(struct clk_hw *hw,
160 unsigned long parent_rate)
162 struct jh7110_clk *clk = jh7110_clk_from(hw);
163 unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
164 1UL, (unsigned long)clk->max_div);
166 jh7110_clk_reg_rmw(clk, JH7110_CLK_DIV_MASK, div);
170 static u8 jh7110_clk_get_parent(struct clk_hw *hw)
172 struct jh7110_clk *clk = jh7110_clk_from(hw);
173 u32 value = jh7110_clk_reg_get(clk);
175 return (value & JH7110_CLK_MUX_MASK) >> JH7110_CLK_MUX_SHIFT;
178 static int jh7110_clk_set_parent(struct clk_hw *hw, u8 index)
180 struct jh7110_clk *clk = jh7110_clk_from(hw);
181 u32 value = (u32)index << JH7110_CLK_MUX_SHIFT;
183 jh7110_clk_reg_rmw(clk, JH7110_CLK_MUX_MASK, value);
187 static int jh7110_clk_mux_determine_rate(struct clk_hw *hw,
188 struct clk_rate_request *req)
190 return clk_mux_determine_rate_flags(hw, req, 0);
193 static int jh7110_clk_get_phase(struct clk_hw *hw)
195 struct jh7110_clk *clk = jh7110_clk_from(hw);
196 u32 value = jh7110_clk_reg_get(clk);
198 return (value & JH7110_CLK_INVERT) ? 180 : 0;
201 static int jh7110_clk_set_phase(struct clk_hw *hw, int degrees)
203 struct jh7110_clk *clk = jh7110_clk_from(hw);
208 else if (degrees == 180)
209 value = JH7110_CLK_INVERT;
213 jh7110_clk_reg_rmw(clk, JH7110_CLK_INVERT, value);
217 #ifdef CONFIG_DEBUG_FS
218 static void jh7110_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
220 static const struct debugfs_reg32 jh7110_clk_reg = {
224 struct jh7110_clk *clk = jh7110_clk_from(hw);
225 struct jh7110_clk_priv *priv = jh7110_priv_from(clk);
226 struct debugfs_regset32 *regset;
228 regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
232 regset->regs = &jh7110_clk_reg;
234 regset->base = jh7110_clk_reg_addr_get(clk);
236 debugfs_create_regset32("registers", 0400, dentry, regset);
239 #define jh7110_clk_debug_init NULL
242 #ifdef CONFIG_PM_SLEEP
243 static int jh7110_clk_save_context(struct clk_hw *hw)
245 struct jh7110_clk *clk = jh7110_clk_from(hw);
246 void __iomem *reg = jh7110_clk_reg_addr_get(clk);
247 struct jh7110_clk_priv *priv = jh7110_priv_from(clk);
252 if ((clk->reg_flags == JH7110_CLK_ISP_FLAG) || (clk->reg_flags == JH7110_CLK_VOUT_FLAG))
255 if (clk->idx >= JH7110_CLK_REG_END)
258 spin_lock(&priv->rmw_lock);
259 clk->saved_reg_value = readl_relaxed(reg);
260 spin_unlock(&priv->rmw_lock);
265 static void jh7110_clk_gate_restore_context(struct clk_hw *hw)
267 struct jh7110_clk *clk = jh7110_clk_from(hw);
272 if ((clk->reg_flags == JH7110_CLK_ISP_FLAG) || (clk->reg_flags == JH7110_CLK_VOUT_FLAG))
275 if (clk->idx >= JH7110_CLK_REG_END)
278 jh7110_clk_reg_rmw(clk, JH7110_CLK_ENABLE, clk->saved_reg_value);
283 static void jh7110_clk_div_restore_context(struct clk_hw *hw)
285 struct jh7110_clk *clk = jh7110_clk_from(hw);
290 if ((clk->reg_flags == JH7110_CLK_ISP_FLAG) || (clk->reg_flags == JH7110_CLK_VOUT_FLAG))
293 if (clk->idx >= JH7110_CLK_REG_END)
296 jh7110_clk_reg_rmw(clk, JH7110_CLK_DIV_MASK, clk->saved_reg_value);
301 static void jh7110_clk_mux_restore_context(struct clk_hw *hw)
303 struct jh7110_clk *clk = jh7110_clk_from(hw);
308 if ((clk->reg_flags == JH7110_CLK_ISP_FLAG) || (clk->reg_flags == JH7110_CLK_VOUT_FLAG))
311 if (clk->idx >= JH7110_CLK_REG_END)
314 jh7110_clk_reg_rmw(clk, JH7110_CLK_MUX_MASK, clk->saved_reg_value);
319 static void jh7110_clk_inv_restore_context(struct clk_hw *hw)
321 struct jh7110_clk *clk = jh7110_clk_from(hw);
326 if ((clk->reg_flags == JH7110_CLK_ISP_FLAG) || (clk->reg_flags == JH7110_CLK_VOUT_FLAG))
329 if (clk->idx >= JH7110_CLK_REG_END)
332 jh7110_clk_reg_rmw(clk, JH7110_CLK_INVERT, clk->saved_reg_value);
337 static void jh7110_clk_gdiv_restore_context(struct clk_hw *hw)
339 jh7110_clk_div_restore_context(hw);
340 jh7110_clk_gate_restore_context(hw);
345 static void jh7110_clk_gmux_restore_context(struct clk_hw *hw)
347 jh7110_clk_mux_restore_context(hw);
348 jh7110_clk_gate_restore_context(hw);
353 static void jh7110_clk_mdiv_restore_context(struct clk_hw *hw)
355 jh7110_clk_mux_restore_context(hw);
356 jh7110_clk_div_restore_context(hw);
361 static void jh7110_clk_gmd_restore_context(struct clk_hw *hw)
363 jh7110_clk_mux_restore_context(hw);
364 jh7110_clk_div_restore_context(hw);
365 jh7110_clk_gate_restore_context(hw);
372 static const struct clk_ops jh7110_clk_gate_ops = {
373 .enable = jh7110_clk_enable,
374 .disable = jh7110_clk_disable,
375 .is_enabled = jh7110_clk_is_enabled,
376 .debug_init = jh7110_clk_debug_init,
377 #ifdef CONFIG_PM_SLEEP
378 .save_context = jh7110_clk_save_context,
379 .restore_context = jh7110_clk_gate_restore_context,
383 static const struct clk_ops jh7110_clk_div_ops = {
384 .recalc_rate = jh7110_clk_recalc_rate,
385 .determine_rate = jh7110_clk_determine_rate,
386 .set_rate = jh7110_clk_set_rate,
387 .debug_init = jh7110_clk_debug_init,
388 #ifdef CONFIG_PM_SLEEP
389 .save_context = jh7110_clk_save_context,
390 .restore_context = jh7110_clk_div_restore_context,
394 static const struct clk_ops jh7110_clk_gdiv_ops = {
395 .enable = jh7110_clk_enable,
396 .disable = jh7110_clk_disable,
397 .is_enabled = jh7110_clk_is_enabled,
398 .recalc_rate = jh7110_clk_recalc_rate,
399 .determine_rate = jh7110_clk_determine_rate,
400 .set_rate = jh7110_clk_set_rate,
401 .debug_init = jh7110_clk_debug_init,
402 #ifdef CONFIG_PM_SLEEP
403 .save_context = jh7110_clk_save_context,
404 .restore_context = jh7110_clk_gdiv_restore_context,
408 static const struct clk_ops jh7110_clk_mux_ops = {
409 .determine_rate = jh7110_clk_mux_determine_rate,
410 .set_parent = jh7110_clk_set_parent,
411 .get_parent = jh7110_clk_get_parent,
412 .debug_init = jh7110_clk_debug_init,
413 #ifdef CONFIG_PM_SLEEP
414 .save_context = jh7110_clk_save_context,
415 .restore_context = jh7110_clk_mux_restore_context,
419 static const struct clk_ops jh7110_clk_gmux_ops = {
420 .enable = jh7110_clk_enable,
421 .disable = jh7110_clk_disable,
422 .is_enabled = jh7110_clk_is_enabled,
423 .determine_rate = jh7110_clk_mux_determine_rate,
424 .set_parent = jh7110_clk_set_parent,
425 .get_parent = jh7110_clk_get_parent,
426 .debug_init = jh7110_clk_debug_init,
427 #ifdef CONFIG_PM_SLEEP
428 .save_context = jh7110_clk_save_context,
429 .restore_context = jh7110_clk_gmux_restore_context,
433 static const struct clk_ops jh7110_clk_mdiv_ops = {
434 .recalc_rate = jh7110_clk_recalc_rate,
435 .determine_rate = jh7110_clk_determine_rate,
436 .get_parent = jh7110_clk_get_parent,
437 .set_parent = jh7110_clk_set_parent,
438 .set_rate = jh7110_clk_set_rate,
439 .debug_init = jh7110_clk_debug_init,
440 #ifdef CONFIG_PM_SLEEP
441 .save_context = jh7110_clk_save_context,
442 .restore_context = jh7110_clk_mdiv_restore_context,
446 static const struct clk_ops jh7110_clk_gmd_ops = {
447 .enable = jh7110_clk_enable,
448 .disable = jh7110_clk_disable,
449 .is_enabled = jh7110_clk_is_enabled,
450 .recalc_rate = jh7110_clk_recalc_rate,
451 .determine_rate = jh7110_clk_determine_rate,
452 .get_parent = jh7110_clk_get_parent,
453 .set_parent = jh7110_clk_set_parent,
454 .set_rate = jh7110_clk_set_rate,
455 .debug_init = jh7110_clk_debug_init,
456 #ifdef CONFIG_PM_SLEEP
457 .save_context = jh7110_clk_save_context,
458 .restore_context = jh7110_clk_gmd_restore_context,
462 static const struct clk_ops jh7110_clk_inv_ops = {
463 .get_phase = jh7110_clk_get_phase,
464 .set_phase = jh7110_clk_set_phase,
465 .debug_init = jh7110_clk_debug_init,
466 #ifdef CONFIG_PM_SLEEP
467 .save_context = jh7110_clk_save_context,
468 .restore_context = jh7110_clk_inv_restore_context,
472 const struct clk_ops *starfive_jh7110_clk_ops(u32 max)
474 const struct clk_ops *ops;
476 if (max & JH7110_CLK_DIV_MASK) {
477 if (max & JH7110_CLK_MUX_MASK) {
478 if (max & JH7110_CLK_ENABLE)
479 ops = &jh7110_clk_gmd_ops;
481 ops = &jh7110_clk_mdiv_ops;
482 } else if (max & JH7110_CLK_ENABLE)
483 ops = &jh7110_clk_gdiv_ops;
485 ops = &jh7110_clk_div_ops;
486 } else if (max & JH7110_CLK_MUX_MASK) {
487 if (max & JH7110_CLK_ENABLE)
488 ops = &jh7110_clk_gmux_ops;
490 ops = &jh7110_clk_mux_ops;
491 } else if (max & JH7110_CLK_ENABLE)
492 ops = &jh7110_clk_gate_ops;
494 ops = &jh7110_clk_inv_ops;
498 EXPORT_SYMBOL_GPL(starfive_jh7110_clk_ops);
500 #ifdef CONFIG_PM_SLEEP
501 static int clk_starfive_jh7110_gen_system_suspend(struct device *dev)
503 return clk_save_context();
506 static int clk_starfive_jh7110_gen_system_resume(struct device *dev)
508 clk_restore_context();
514 static const struct dev_pm_ops clk_starfive_jh7110_gen_pm_ops = {
515 SET_LATE_SYSTEM_SLEEP_PM_OPS(clk_starfive_jh7110_gen_system_suspend,
516 clk_starfive_jh7110_gen_system_resume)
519 static struct clk_hw *jh7110_clk_get(struct of_phandle_args *clkspec,
522 struct jh7110_clk_priv *priv = data;
523 unsigned int idx = clkspec->args[0];
525 if (idx < JH7110_PLL0_OUT)
526 return &priv->reg[idx].hw;
528 if (idx < JH7110_CLK_END) {
529 #ifdef CONFIG_CLK_STARFIVE_JH7110_PLL
530 if ((idx == JH7110_PLL0_OUT) || (idx == JH7110_PLL2_OUT))
531 return &priv->pll_priv[PLL_OF(idx)].hw;
533 return priv->pll[PLL_OF(idx)];
536 return ERR_PTR(-EINVAL);
540 static int __init clk_starfive_jh7110_probe(struct platform_device *pdev)
542 struct jh7110_clk_priv *priv;
545 priv = devm_kzalloc(&pdev->dev, struct_size(priv, reg, JH7110_PLL0_OUT),
550 spin_lock_init(&priv->rmw_lock);
551 priv->dev = &pdev->dev;
553 pm_runtime_enable(priv->dev);
555 #ifdef CONFIG_CLK_STARFIVE_JH7110_PLL
556 ret = clk_starfive_jh7110_pll_init(pdev, priv->pll_priv);
561 ret = clk_starfive_jh7110_sys_init(pdev, priv);
565 /* set PLL0 default rate */
566 #ifdef CONFIG_CLK_STARFIVE_JH7110_PLL
567 if (PLL0_DEFAULT_FREQ) {
568 struct clk *pll0_clk = priv->pll_priv[PLL0_INDEX].hw.clk;
569 struct clk *cpu_root = priv->reg[JH7110_CPU_ROOT].hw.clk;
570 struct clk *osc_clk = clk_get(&pdev->dev, "osc");
573 dev_err(&pdev->dev, "get osc_clk failed\n");
575 if (PLL0_DEFAULT_FREQ >= PLL0_FREQ_1500_VALUE) {
576 struct clk *cpu_core = priv->reg[JH7110_CPU_CORE].hw.clk;
578 if (clk_set_rate(cpu_core, clk_get_rate(pll0_clk) / 2)) {
579 dev_err(&pdev->dev, "set cpu_core rate failed\n");
584 if (clk_set_parent(cpu_root, osc_clk)) {
585 dev_err(&pdev->dev, "set parent to osc_clk failed\n");
589 if (clk_set_rate(pll0_clk, PLL0_DEFAULT_FREQ))
590 dev_err(&pdev->dev, "set pll0 rate failed\n");
592 if (clk_set_parent(cpu_root, pll0_clk))
593 dev_err(&pdev->dev, "set parent to pll0_clk failed\n");
600 ret = clk_starfive_jh7110_stg_init(pdev, priv);
604 ret = clk_starfive_jh7110_aon_init(pdev, priv);
608 ret = devm_of_clk_add_hw_provider(priv->dev, jh7110_clk_get, priv);
612 dev_info(&pdev->dev, "starfive JH7110 clkgen init successfully.");
616 static const struct of_device_id clk_starfive_jh7110_match[] = {
617 {.compatible = "starfive,jh7110-clkgen"},
621 static struct platform_driver clk_starfive_jh7110_driver = {
623 .name = "clk-starfive-jh7110",
624 .of_match_table = clk_starfive_jh7110_match,
625 .pm = &clk_starfive_jh7110_gen_pm_ops,
628 builtin_platform_driver_probe(clk_starfive_jh7110_driver,
629 clk_starfive_jh7110_probe);
631 MODULE_AUTHOR("Xingyu Wu <xingyu.wu@starfivetech.com>");
632 MODULE_DESCRIPTION("StarFive JH7110 sysgen clock driver");
633 MODULE_LICENSE("GPL");