Merge tag 'soundwire-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[platform/kernel/linux-rpi.git] / drivers / clk / starfive / clk-starfive-jh71x0.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * StarFive JH71X0 Clock Generator Driver
4  *
5  * Copyright (C) 2021-2022 Emil Renner Berthing <kernel@esmil.dk>
6  */
7
8 #include <linux/clk-provider.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/io.h>
12
13 #include "clk-starfive-jh71x0.h"
14
15 static struct jh71x0_clk *jh71x0_clk_from(struct clk_hw *hw)
16 {
17         return container_of(hw, struct jh71x0_clk, hw);
18 }
19
20 static struct jh71x0_clk_priv *jh71x0_priv_from(struct jh71x0_clk *clk)
21 {
22         return container_of(clk, struct jh71x0_clk_priv, reg[clk->idx]);
23 }
24
25 static u32 jh71x0_clk_reg_get(struct jh71x0_clk *clk)
26 {
27         struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
28         void __iomem *reg = priv->base + 4 * clk->idx;
29
30         return readl_relaxed(reg);
31 }
32
33 static void jh71x0_clk_reg_rmw(struct jh71x0_clk *clk, u32 mask, u32 value)
34 {
35         struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
36         void __iomem *reg = priv->base + 4 * clk->idx;
37         unsigned long flags;
38
39         spin_lock_irqsave(&priv->rmw_lock, flags);
40         value |= readl_relaxed(reg) & ~mask;
41         writel_relaxed(value, reg);
42         spin_unlock_irqrestore(&priv->rmw_lock, flags);
43 }
44
45 static int jh71x0_clk_enable(struct clk_hw *hw)
46 {
47         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
48
49         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, JH71X0_CLK_ENABLE);
50         return 0;
51 }
52
53 static void jh71x0_clk_disable(struct clk_hw *hw)
54 {
55         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
56
57         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, 0);
58 }
59
60 static int jh71x0_clk_is_enabled(struct clk_hw *hw)
61 {
62         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
63
64         return !!(jh71x0_clk_reg_get(clk) & JH71X0_CLK_ENABLE);
65 }
66
67 static unsigned long jh71x0_clk_recalc_rate(struct clk_hw *hw,
68                                             unsigned long parent_rate)
69 {
70         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
71         u32 div = jh71x0_clk_reg_get(clk) & JH71X0_CLK_DIV_MASK;
72
73         return div ? parent_rate / div : 0;
74 }
75
76 static int jh71x0_clk_determine_rate(struct clk_hw *hw,
77                                      struct clk_rate_request *req)
78 {
79         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
80         unsigned long parent = req->best_parent_rate;
81         unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
82         unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
83         unsigned long result = parent / div;
84
85         /*
86          * we want the result clamped by min_rate and max_rate if possible:
87          * case 1: div hits the max divider value, which means it's less than
88          * parent / rate, so the result is greater than rate and min_rate in
89          * particular. we can't do anything about result > max_rate because the
90          * divider doesn't go any further.
91          * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
92          * always lower or equal to rate and max_rate. however the result may
93          * turn out lower than min_rate, but then the next higher rate is fine:
94          *   div - 1 = ceil(parent / rate) - 1 < parent / rate
95          * and thus
96          *   min_rate <= rate < parent / (div - 1)
97          */
98         if (result < req->min_rate && div > 1)
99                 result = parent / (div - 1);
100
101         req->rate = result;
102         return 0;
103 }
104
105 static int jh71x0_clk_set_rate(struct clk_hw *hw,
106                                unsigned long rate,
107                                unsigned long parent_rate)
108 {
109         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
110         unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
111                                   1UL, (unsigned long)clk->max_div);
112
113         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, div);
114         return 0;
115 }
116
117 static unsigned long jh71x0_clk_frac_recalc_rate(struct clk_hw *hw,
118                                                  unsigned long parent_rate)
119 {
120         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
121         u32 reg = jh71x0_clk_reg_get(clk);
122         unsigned long div100 = 100 * (reg & JH71X0_CLK_INT_MASK) +
123                                ((reg & JH71X0_CLK_FRAC_MASK) >> JH71X0_CLK_FRAC_SHIFT);
124
125         return (div100 >= JH71X0_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
126 }
127
128 static int jh71x0_clk_frac_determine_rate(struct clk_hw *hw,
129                                           struct clk_rate_request *req)
130 {
131         unsigned long parent100 = 100 * req->best_parent_rate;
132         unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
133         unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
134                                      JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
135         unsigned long result = parent100 / div100;
136
137         /* clamp the result as in jh71x0_clk_determine_rate() above */
138         if (result > req->max_rate && div100 < JH71X0_CLK_FRAC_MAX)
139                 result = parent100 / (div100 + 1);
140         if (result < req->min_rate && div100 > JH71X0_CLK_FRAC_MIN)
141                 result = parent100 / (div100 - 1);
142
143         req->rate = result;
144         return 0;
145 }
146
147 static int jh71x0_clk_frac_set_rate(struct clk_hw *hw,
148                                     unsigned long rate,
149                                     unsigned long parent_rate)
150 {
151         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
152         unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
153                                      JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
154         u32 value = ((div100 % 100) << JH71X0_CLK_FRAC_SHIFT) | (div100 / 100);
155
156         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, value);
157         return 0;
158 }
159
160 static u8 jh71x0_clk_get_parent(struct clk_hw *hw)
161 {
162         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
163         u32 value = jh71x0_clk_reg_get(clk);
164
165         return (value & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT;
166 }
167
168 static int jh71x0_clk_set_parent(struct clk_hw *hw, u8 index)
169 {
170         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
171         u32 value = (u32)index << JH71X0_CLK_MUX_SHIFT;
172
173         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_MUX_MASK, value);
174         return 0;
175 }
176
177 static int jh71x0_clk_mux_determine_rate(struct clk_hw *hw,
178                                          struct clk_rate_request *req)
179 {
180         return clk_mux_determine_rate_flags(hw, req, 0);
181 }
182
183 static int jh71x0_clk_get_phase(struct clk_hw *hw)
184 {
185         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
186         u32 value = jh71x0_clk_reg_get(clk);
187
188         return (value & JH71X0_CLK_INVERT) ? 180 : 0;
189 }
190
191 static int jh71x0_clk_set_phase(struct clk_hw *hw, int degrees)
192 {
193         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
194         u32 value;
195
196         if (degrees == 0)
197                 value = 0;
198         else if (degrees == 180)
199                 value = JH71X0_CLK_INVERT;
200         else
201                 return -EINVAL;
202
203         jh71x0_clk_reg_rmw(clk, JH71X0_CLK_INVERT, value);
204         return 0;
205 }
206
207 #ifdef CONFIG_DEBUG_FS
208 static void jh71x0_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
209 {
210         static const struct debugfs_reg32 jh71x0_clk_reg = {
211                 .name = "CTRL",
212                 .offset = 0,
213         };
214         struct jh71x0_clk *clk = jh71x0_clk_from(hw);
215         struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
216         struct debugfs_regset32 *regset;
217
218         regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
219         if (!regset)
220                 return;
221
222         regset->regs = &jh71x0_clk_reg;
223         regset->nregs = 1;
224         regset->base = priv->base + 4 * clk->idx;
225
226         debugfs_create_regset32("registers", 0400, dentry, regset);
227 }
228 #else
229 #define jh71x0_clk_debug_init NULL
230 #endif
231
232 static const struct clk_ops jh71x0_clk_gate_ops = {
233         .enable = jh71x0_clk_enable,
234         .disable = jh71x0_clk_disable,
235         .is_enabled = jh71x0_clk_is_enabled,
236         .debug_init = jh71x0_clk_debug_init,
237 };
238
239 static const struct clk_ops jh71x0_clk_div_ops = {
240         .recalc_rate = jh71x0_clk_recalc_rate,
241         .determine_rate = jh71x0_clk_determine_rate,
242         .set_rate = jh71x0_clk_set_rate,
243         .debug_init = jh71x0_clk_debug_init,
244 };
245
246 static const struct clk_ops jh71x0_clk_fdiv_ops = {
247         .recalc_rate = jh71x0_clk_frac_recalc_rate,
248         .determine_rate = jh71x0_clk_frac_determine_rate,
249         .set_rate = jh71x0_clk_frac_set_rate,
250         .debug_init = jh71x0_clk_debug_init,
251 };
252
253 static const struct clk_ops jh71x0_clk_gdiv_ops = {
254         .enable = jh71x0_clk_enable,
255         .disable = jh71x0_clk_disable,
256         .is_enabled = jh71x0_clk_is_enabled,
257         .recalc_rate = jh71x0_clk_recalc_rate,
258         .determine_rate = jh71x0_clk_determine_rate,
259         .set_rate = jh71x0_clk_set_rate,
260         .debug_init = jh71x0_clk_debug_init,
261 };
262
263 static const struct clk_ops jh71x0_clk_mux_ops = {
264         .determine_rate = jh71x0_clk_mux_determine_rate,
265         .set_parent = jh71x0_clk_set_parent,
266         .get_parent = jh71x0_clk_get_parent,
267         .debug_init = jh71x0_clk_debug_init,
268 };
269
270 static const struct clk_ops jh71x0_clk_gmux_ops = {
271         .enable = jh71x0_clk_enable,
272         .disable = jh71x0_clk_disable,
273         .is_enabled = jh71x0_clk_is_enabled,
274         .determine_rate = jh71x0_clk_mux_determine_rate,
275         .set_parent = jh71x0_clk_set_parent,
276         .get_parent = jh71x0_clk_get_parent,
277         .debug_init = jh71x0_clk_debug_init,
278 };
279
280 static const struct clk_ops jh71x0_clk_mdiv_ops = {
281         .recalc_rate = jh71x0_clk_recalc_rate,
282         .determine_rate = jh71x0_clk_determine_rate,
283         .get_parent = jh71x0_clk_get_parent,
284         .set_parent = jh71x0_clk_set_parent,
285         .set_rate = jh71x0_clk_set_rate,
286         .debug_init = jh71x0_clk_debug_init,
287 };
288
289 static const struct clk_ops jh71x0_clk_gmd_ops = {
290         .enable = jh71x0_clk_enable,
291         .disable = jh71x0_clk_disable,
292         .is_enabled = jh71x0_clk_is_enabled,
293         .recalc_rate = jh71x0_clk_recalc_rate,
294         .determine_rate = jh71x0_clk_determine_rate,
295         .get_parent = jh71x0_clk_get_parent,
296         .set_parent = jh71x0_clk_set_parent,
297         .set_rate = jh71x0_clk_set_rate,
298         .debug_init = jh71x0_clk_debug_init,
299 };
300
301 static const struct clk_ops jh71x0_clk_inv_ops = {
302         .get_phase = jh71x0_clk_get_phase,
303         .set_phase = jh71x0_clk_set_phase,
304         .debug_init = jh71x0_clk_debug_init,
305 };
306
307 const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
308 {
309         if (max & JH71X0_CLK_DIV_MASK) {
310                 if (max & JH71X0_CLK_MUX_MASK) {
311                         if (max & JH71X0_CLK_ENABLE)
312                                 return &jh71x0_clk_gmd_ops;
313                         return &jh71x0_clk_mdiv_ops;
314                 }
315                 if (max & JH71X0_CLK_ENABLE)
316                         return &jh71x0_clk_gdiv_ops;
317                 if (max == JH71X0_CLK_FRAC_MAX)
318                         return &jh71x0_clk_fdiv_ops;
319                 return &jh71x0_clk_div_ops;
320         }
321
322         if (max & JH71X0_CLK_MUX_MASK) {
323                 if (max & JH71X0_CLK_ENABLE)
324                         return &jh71x0_clk_gmux_ops;
325                 return &jh71x0_clk_mux_ops;
326         }
327
328         if (max & JH71X0_CLK_ENABLE)
329                 return &jh71x0_clk_gate_ops;
330
331         return &jh71x0_clk_inv_ops;
332 }
333 EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);