clk: starfive: Add StarFive JH7110 system clock driver
[platform/kernel/linux-starfive.git] / drivers / clk / starfive / clk-starfive-jh71x0.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * StarFive JH71X0 Clock Generator Driver
4  *
5  * Copyright (C) 2021-2022 Emil Renner Berthing <kernel@esmil.dk>
6  */
7
8 #include <linux/auxiliary_bus.h>
9 #include <linux/clk-provider.h>
10 #include <linux/debugfs.h>
11 #include <linux/device.h>
12 #include <linux/io.h>
13
14 #include "clk-starfive-jh71x0.h"
15
16 static struct jh71x0_clk *jh71x0_clk_from(struct clk_hw *hw)
17 {
18         return container_of(hw, struct jh71x0_clk, hw);
19 }
20
21 static struct jh71x0_clk_priv *jh71x0_priv_from(struct jh71x0_clk *clk)
22 {
23         return container_of(clk, struct jh71x0_clk_priv, reg[clk->idx]);
24 }
25
26 static u32 jh71x0_clk_reg_get(struct jh71x0_clk *clk)
27 {
28         struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
29         void __iomem *reg = priv->base + 4 * clk->idx;
30
31         return readl_relaxed(reg);
32 }
33
34 static void jh71x0_clk_reg_rmw(struct jh71x0_clk *clk, u32 mask, u32 value)
35 {
36         struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
37         void __iomem *reg = priv->base + 4 * clk->idx;
38         unsigned long flags;
39
40         spin_lock_irqsave(&priv->rmw_lock, flags);
41         value |= readl_relaxed(reg) & ~mask;
42         writel_relaxed(value, reg);
43         spin_unlock_irqrestore(&priv->rmw_lock, flags);
44 }
45
46 static int jh71x0_clk_enable(struct clk_hw *hw)
47 {
48         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
49
50         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, JH71X0_CLK_ENABLE);
51         return 0;
52 }
53
54 static void jh71x0_clk_disable(struct clk_hw *hw)
55 {
56         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
57
58         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, 0);
59 }
60
61 static int jh71x0_clk_is_enabled(struct clk_hw *hw)
62 {
63         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
64
65         return !!(jh71x0_clk_reg_get(clk) & JH71X0_CLK_ENABLE);
66 }
67
68 static unsigned long jh71x0_clk_recalc_rate(struct clk_hw *hw,
69                                             unsigned long parent_rate)
70 {
71         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
72         u32 div = jh71x0_clk_reg_get(clk) & JH71X0_CLK_DIV_MASK;
73
74         return div ? parent_rate / div : 0;
75 }
76
77 static int jh71x0_clk_determine_rate(struct clk_hw *hw,
78                                      struct clk_rate_request *req)
79 {
80         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
81         unsigned long parent = req->best_parent_rate;
82         unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
83         unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
84         unsigned long result = parent / div;
85
86         /*
87          * we want the result clamped by min_rate and max_rate if possible:
88          * case 1: div hits the max divider value, which means it's less than
89          * parent / rate, so the result is greater than rate and min_rate in
90          * particular. we can't do anything about result > max_rate because the
91          * divider doesn't go any further.
92          * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
93          * always lower or equal to rate and max_rate. however the result may
94          * turn out lower than min_rate, but then the next higher rate is fine:
95          *   div - 1 = ceil(parent / rate) - 1 < parent / rate
96          * and thus
97          *   min_rate <= rate < parent / (div - 1)
98          */
99         if (result < req->min_rate && div > 1)
100                 result = parent / (div - 1);
101
102         req->rate = result;
103         return 0;
104 }
105
106 static int jh71x0_clk_set_rate(struct clk_hw *hw,
107                                unsigned long rate,
108                                unsigned long parent_rate)
109 {
110         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
111         unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
112                                   1UL, (unsigned long)clk->max_div);
113
114         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, div);
115         return 0;
116 }
117
118 static unsigned long jh71x0_clk_frac_recalc_rate(struct clk_hw *hw,
119                                                  unsigned long parent_rate)
120 {
121         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
122         u32 reg = jh71x0_clk_reg_get(clk);
123         unsigned long div100 = 100 * (reg & JH71X0_CLK_INT_MASK) +
124                                ((reg & JH71X0_CLK_FRAC_MASK) >> JH71X0_CLK_FRAC_SHIFT);
125
126         return (div100 >= JH71X0_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
127 }
128
129 static int jh71x0_clk_frac_determine_rate(struct clk_hw *hw,
130                                           struct clk_rate_request *req)
131 {
132         unsigned long parent100 = 100 * req->best_parent_rate;
133         unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
134         unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
135                                      JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
136         unsigned long result = parent100 / div100;
137
138         /* clamp the result as in jh71x0_clk_determine_rate() above */
139         if (result > req->max_rate && div100 < JH71X0_CLK_FRAC_MAX)
140                 result = parent100 / (div100 + 1);
141         if (result < req->min_rate && div100 > JH71X0_CLK_FRAC_MIN)
142                 result = parent100 / (div100 - 1);
143
144         req->rate = result;
145         return 0;
146 }
147
148 static int jh71x0_clk_frac_set_rate(struct clk_hw *hw,
149                                     unsigned long rate,
150                                     unsigned long parent_rate)
151 {
152         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
153         unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
154                                      JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
155         u32 value = ((div100 % 100) << JH71X0_CLK_FRAC_SHIFT) | (div100 / 100);
156
157         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, value);
158         return 0;
159 }
160
161 static u8 jh71x0_clk_get_parent(struct clk_hw *hw)
162 {
163         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
164         u32 value = jh71x0_clk_reg_get(clk);
165
166         return (value & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT;
167 }
168
169 static int jh71x0_clk_set_parent(struct clk_hw *hw, u8 index)
170 {
171         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
172         u32 value = (u32)index << JH71X0_CLK_MUX_SHIFT;
173
174         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_MUX_MASK, value);
175         return 0;
176 }
177
178 static int jh71x0_clk_mux_determine_rate(struct clk_hw *hw,
179                                          struct clk_rate_request *req)
180 {
181         return clk_mux_determine_rate_flags(hw, req, 0);
182 }
183
184 static int jh71x0_clk_get_phase(struct clk_hw *hw)
185 {
186         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
187         u32 value = jh71x0_clk_reg_get(clk);
188
189         return (value & JH71X0_CLK_INVERT) ? 180 : 0;
190 }
191
192 static int jh71x0_clk_set_phase(struct clk_hw *hw, int degrees)
193 {
194         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
195         u32 value;
196
197         if (degrees == 0)
198                 value = 0;
199         else if (degrees == 180)
200                 value = JH71X0_CLK_INVERT;
201         else
202                 return -EINVAL;
203
204         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_INVERT, value);
205         return 0;
206 }
207
208 #ifdef CONFIG_DEBUG_FS
209 static void jh71x0_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
210 {
211         static const struct debugfs_reg32 jh71x0_clk_reg = {
212                 .name = "CTRL",
213                 .offset = 0,
214         };
215         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
216         struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
217         struct debugfs_regset32 *regset;
218
219         regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
220         if (!regset)
221                 return;
222
223         regset->regs = &jh71x0_clk_reg;
224         regset->nregs = 1;
225         regset->base = priv->base + 4 * clk->idx;
226
227         debugfs_create_regset32("registers", 0400, dentry, regset);
228 }
229 #else
230 #define jh71x0_clk_debug_init NULL
231 #endif
232
233 static const struct clk_ops jh71x0_clk_gate_ops = {
234         .enable = jh71x0_clk_enable,
235         .disable = jh71x0_clk_disable,
236         .is_enabled = jh71x0_clk_is_enabled,
237         .debug_init = jh71x0_clk_debug_init,
238 };
239
240 static const struct clk_ops jh71x0_clk_div_ops = {
241         .recalc_rate = jh71x0_clk_recalc_rate,
242         .determine_rate = jh71x0_clk_determine_rate,
243         .set_rate = jh71x0_clk_set_rate,
244         .debug_init = jh71x0_clk_debug_init,
245 };
246
247 static const struct clk_ops jh71x0_clk_fdiv_ops = {
248         .recalc_rate = jh71x0_clk_frac_recalc_rate,
249         .determine_rate = jh71x0_clk_frac_determine_rate,
250         .set_rate = jh71x0_clk_frac_set_rate,
251         .debug_init = jh71x0_clk_debug_init,
252 };
253
254 static const struct clk_ops jh71x0_clk_gdiv_ops = {
255         .enable = jh71x0_clk_enable,
256         .disable = jh71x0_clk_disable,
257         .is_enabled = jh71x0_clk_is_enabled,
258         .recalc_rate = jh71x0_clk_recalc_rate,
259         .determine_rate = jh71x0_clk_determine_rate,
260         .set_rate = jh71x0_clk_set_rate,
261         .debug_init = jh71x0_clk_debug_init,
262 };
263
264 static const struct clk_ops jh71x0_clk_mux_ops = {
265         .determine_rate = jh71x0_clk_mux_determine_rate,
266         .set_parent = jh71x0_clk_set_parent,
267         .get_parent = jh71x0_clk_get_parent,
268         .debug_init = jh71x0_clk_debug_init,
269 };
270
271 static const struct clk_ops jh71x0_clk_gmux_ops = {
272         .enable = jh71x0_clk_enable,
273         .disable = jh71x0_clk_disable,
274         .is_enabled = jh71x0_clk_is_enabled,
275         .determine_rate = jh71x0_clk_mux_determine_rate,
276         .set_parent = jh71x0_clk_set_parent,
277         .get_parent = jh71x0_clk_get_parent,
278         .debug_init = jh71x0_clk_debug_init,
279 };
280
281 static const struct clk_ops jh71x0_clk_mdiv_ops = {
282         .recalc_rate = jh71x0_clk_recalc_rate,
283         .determine_rate = jh71x0_clk_determine_rate,
284         .get_parent = jh71x0_clk_get_parent,
285         .set_parent = jh71x0_clk_set_parent,
286         .set_rate = jh71x0_clk_set_rate,
287         .debug_init = jh71x0_clk_debug_init,
288 };
289
290 static const struct clk_ops jh71x0_clk_gmd_ops = {
291         .enable = jh71x0_clk_enable,
292         .disable = jh71x0_clk_disable,
293         .is_enabled = jh71x0_clk_is_enabled,
294         .recalc_rate = jh71x0_clk_recalc_rate,
295         .determine_rate = jh71x0_clk_determine_rate,
296         .get_parent = jh71x0_clk_get_parent,
297         .set_parent = jh71x0_clk_set_parent,
298         .set_rate = jh71x0_clk_set_rate,
299         .debug_init = jh71x0_clk_debug_init,
300 };
301
302 static const struct clk_ops jh71x0_clk_inv_ops = {
303         .get_phase = jh71x0_clk_get_phase,
304         .set_phase = jh71x0_clk_set_phase,
305         .debug_init = jh71x0_clk_debug_init,
306 };
307
308 const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
309 {
310         if (max & JH71X0_CLK_DIV_MASK) {
311                 if (max & JH71X0_CLK_MUX_MASK) {
312                         if (max & JH71X0_CLK_ENABLE)
313                                 return &jh71x0_clk_gmd_ops;
314                         return &jh71x0_clk_mdiv_ops;
315                 }
316                 if (max & JH71X0_CLK_ENABLE)
317                         return &jh71x0_clk_gdiv_ops;
318                 if (max == JH71X0_CLK_FRAC_MAX)
319                         return &jh71x0_clk_fdiv_ops;
320                 return &jh71x0_clk_div_ops;
321         }
322
323         if (max & JH71X0_CLK_MUX_MASK) {
324                 if (max & JH71X0_CLK_ENABLE)
325                         return &jh71x0_clk_gmux_ops;
326                 return &jh71x0_clk_mux_ops;
327         }
328
329         if (max & JH71X0_CLK_ENABLE)
330                 return &jh71x0_clk_gate_ops;
331
332         return &jh71x0_clk_inv_ops;
333 }
334 EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
335
336 #if IS_ENABLED(CONFIG_CLK_STARFIVE_JH7110_SYS)
337
338 static void jh7110_reset_unregister_adev(void *_adev)
339 {
340         struct auxiliary_device *adev = _adev;
341
342         auxiliary_device_delete(adev);
343 }
344
345 static void jh7110_reset_adev_release(struct device *dev)
346 {
347         struct auxiliary_device *adev = to_auxiliary_dev(dev);
348
349         auxiliary_device_uninit(adev);
350 }
351
352 int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
353                                      const char *adev_name,
354                                      u32 adev_id)
355 {
356         struct auxiliary_device *adev;
357         int ret;
358
359         adev = devm_kzalloc(priv->dev, sizeof(*adev), GFP_KERNEL);
360         if (!adev)
361                 return -ENOMEM;
362
363         adev->name = adev_name;
364         adev->dev.parent = priv->dev;
365         adev->dev.release = jh7110_reset_adev_release;
366         adev->id = adev_id;
367
368         ret = auxiliary_device_init(adev);
369         if (ret)
370                 return ret;
371
372         ret = auxiliary_device_add(adev);
373         if (ret) {
374                 auxiliary_device_uninit(adev);
375                 return ret;
376         }
377
378         return devm_add_action_or_reset(priv->dev,
379                                         jh7110_reset_unregister_adev, adev);
380 }
381 EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
382
383 #endif