clk: mediatek: add support to configure clock driver parent
[platform/kernel/u-boot.git] / drivers / clk / clk-uclass.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Google, Inc
4  * Written by Simon Glass <sjg@chromium.org>
5  * Copyright (c) 2016, NVIDIA CORPORATION.
6  * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
7  */
8
9 #define LOG_CATEGORY UCLASS_CLK
10
11 #include <common.h>
12 #include <clk.h>
13 #include <clk-uclass.h>
14 #include <dm.h>
15 #include <dt-structs.h>
16 #include <errno.h>
17 #include <log.h>
18 #include <malloc.h>
19 #include <asm/global_data.h>
20 #include <dm/device_compat.h>
21 #include <dm/device-internal.h>
22 #include <dm/devres.h>
23 #include <dm/read.h>
24 #include <linux/bug.h>
25 #include <linux/clk-provider.h>
26 #include <linux/err.h>
27
28 static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
29 {
30         return (const struct clk_ops *)dev->driver->ops;
31 }
32
33 struct clk *dev_get_clk_ptr(struct udevice *dev)
34 {
35         return (struct clk *)dev_get_uclass_priv(dev);
36 }
37
38 #if CONFIG_IS_ENABLED(OF_PLATDATA)
39 int clk_get_by_phandle(struct udevice *dev, const struct phandle_1_arg *cells,
40                        struct clk *clk)
41 {
42         int ret;
43
44         ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
45         if (ret)
46                 return ret;
47         clk->id = cells->arg[0];
48
49         return 0;
50 }
51 #endif
52
53 #if CONFIG_IS_ENABLED(OF_REAL)
54 static int clk_of_xlate_default(struct clk *clk,
55                                 struct ofnode_phandle_args *args)
56 {
57         debug("%s(clk=%p)\n", __func__, clk);
58
59         if (args->args_count > 1) {
60                 debug("Invalid args_count: %d\n", args->args_count);
61                 return -EINVAL;
62         }
63
64         if (args->args_count)
65                 clk->id = args->args[0];
66         else
67                 clk->id = 0;
68
69         clk->data = 0;
70
71         return 0;
72 }
73
74 static int clk_get_by_index_tail(int ret, ofnode node,
75                                  struct ofnode_phandle_args *args,
76                                  const char *list_name, int index,
77                                  struct clk *clk)
78 {
79         struct udevice *dev_clk;
80         const struct clk_ops *ops;
81
82         assert(clk);
83         clk->dev = NULL;
84         if (ret)
85                 goto err;
86
87         ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
88         if (ret) {
89                 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
90                       __func__, ret);
91                 return log_msg_ret("get", ret);
92         }
93
94         clk->dev = dev_clk;
95
96         ops = clk_dev_ops(dev_clk);
97
98         if (ops->of_xlate)
99                 ret = ops->of_xlate(clk, args);
100         else
101                 ret = clk_of_xlate_default(clk, args);
102         if (ret) {
103                 debug("of_xlate() failed: %d\n", ret);
104                 return log_msg_ret("xlate", ret);
105         }
106
107         return clk_request(dev_clk, clk);
108 err:
109         debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
110               __func__, ofnode_get_name(node), list_name, index, ret);
111
112         return log_msg_ret("prop", ret);
113 }
114
115 static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
116                                    int index, struct clk *clk)
117 {
118         int ret;
119         struct ofnode_phandle_args args;
120
121         debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
122
123         assert(clk);
124         clk->dev = NULL;
125
126         ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
127                                          index, &args);
128         if (ret) {
129                 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
130                       __func__, ret);
131                 return log_ret(ret);
132         }
133
134
135         return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
136                                      index, clk);
137 }
138
139 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
140 {
141         return clk_get_by_index_nodev(dev_ofnode(dev), index, clk);
142 }
143
144 int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
145 {
146         struct ofnode_phandle_args args;
147         int ret;
148
149         ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
150                                              index, &args);
151
152         return clk_get_by_index_tail(ret, node, &args, "clocks",
153                                      index, clk);
154 }
155
156 int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
157 {
158         int i, ret, err, count;
159
160         bulk->count = 0;
161
162         count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
163         if (count < 1)
164                 return count;
165
166         bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
167         if (!bulk->clks)
168                 return -ENOMEM;
169
170         for (i = 0; i < count; i++) {
171                 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
172                 if (ret < 0)
173                         goto bulk_get_err;
174
175                 ++bulk->count;
176         }
177
178         return 0;
179
180 bulk_get_err:
181         err = clk_release_all(bulk->clks, bulk->count);
182         if (err)
183                 debug("%s: could release all clocks for %p\n",
184                       __func__, dev);
185
186         return ret;
187 }
188
189 static struct clk *clk_set_default_get_by_id(struct clk *clk)
190 {
191         struct clk *c = clk;
192
193         if (CONFIG_IS_ENABLED(CLK_CCF)) {
194                 int ret = clk_get_by_id(clk->id, &c);
195
196                 if (ret) {
197                         debug("%s(): could not get parent clock pointer, id %lu\n",
198                               __func__, clk->id);
199                         ERR_PTR(ret);
200                 }
201         }
202
203         return c;
204 }
205
206 static int clk_set_default_parents(struct udevice *dev,
207                                    enum clk_defaults_stage stage)
208 {
209         struct clk clk, parent_clk, *c, *p;
210         int index;
211         int num_parents;
212         int ret;
213
214         num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
215                                                   "#clock-cells", 0);
216         if (num_parents < 0) {
217                 debug("%s: could not read assigned-clock-parents for %p\n",
218                       __func__, dev);
219                 return 0;
220         }
221
222         for (index = 0; index < num_parents; index++) {
223                 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
224                                               index, &parent_clk);
225                 /* If -ENOENT, this is a no-op entry */
226                 if (ret == -ENOENT)
227                         continue;
228
229                 if (ret) {
230                         debug("%s: could not get parent clock %d for %s\n",
231                               __func__, index, dev_read_name(dev));
232                         return ret;
233                 }
234
235                 p = clk_set_default_get_by_id(&parent_clk);
236                 if (IS_ERR(p))
237                         return PTR_ERR(p);
238
239                 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
240                                               index, &clk);
241                 /*
242                  * If the clock provider is not ready yet, let it handle
243                  * the re-programming later.
244                  */
245                 if (ret == -EPROBE_DEFER) {
246                         ret = 0;
247                         continue;
248                 }
249
250                 if (ret) {
251                         debug("%s: could not get assigned clock %d for %s\n",
252                               __func__, index, dev_read_name(dev));
253                         return ret;
254                 }
255
256                 /* This is clk provider device trying to reparent itself
257                  * It cannot be done right now but need to wait after the
258                  * device is probed
259                  */
260                 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
261                         continue;
262
263                 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
264                         /* do not setup twice the parent clocks */
265                         continue;
266
267                 c = clk_set_default_get_by_id(&clk);
268                 if (IS_ERR(c))
269                         return PTR_ERR(c);
270
271                 ret = clk_set_parent(c, p);
272                 /*
273                  * Not all drivers may support clock-reparenting (as of now).
274                  * Ignore errors due to this.
275                  */
276                 if (ret == -ENOSYS)
277                         continue;
278
279                 if (ret < 0) {
280                         debug("%s: failed to reparent clock %d for %s\n",
281                               __func__, index, dev_read_name(dev));
282                         return ret;
283                 }
284         }
285
286         return 0;
287 }
288
289 static int clk_set_default_rates(struct udevice *dev,
290                                  enum clk_defaults_stage stage)
291 {
292         struct clk clk, *c;
293         int index;
294         int num_rates;
295         int size;
296         int ret = 0;
297         u32 *rates = NULL;
298
299         size = dev_read_size(dev, "assigned-clock-rates");
300         if (size < 0)
301                 return 0;
302
303         num_rates = size / sizeof(u32);
304         rates = calloc(num_rates, sizeof(u32));
305         if (!rates)
306                 return -ENOMEM;
307
308         ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
309         if (ret)
310                 goto fail;
311
312         for (index = 0; index < num_rates; index++) {
313                 /* If 0 is passed, this is a no-op */
314                 if (!rates[index])
315                         continue;
316
317                 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
318                                               index, &clk);
319                 /*
320                  * If the clock provider is not ready yet, let it handle
321                  * the re-programming later.
322                  */
323                 if (ret == -EPROBE_DEFER) {
324                         ret = 0;
325                         continue;
326                 }
327
328                 if (ret) {
329                         dev_dbg(dev,
330                                 "could not get assigned clock %d (err = %d)\n",
331                                 index, ret);
332                         continue;
333                 }
334
335                 /* This is clk provider device trying to program itself
336                  * It cannot be done right now but need to wait after the
337                  * device is probed
338                  */
339                 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
340                         continue;
341
342                 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
343                         /* do not setup twice the parent clocks */
344                         continue;
345
346                 c = clk_set_default_get_by_id(&clk);
347                 if (IS_ERR(c))
348                         return PTR_ERR(c);
349
350                 ret = clk_set_rate(c, rates[index]);
351
352                 if (ret < 0) {
353                         dev_warn(dev,
354                                  "failed to set rate on clock index %d (%ld) (error = %d)\n",
355                                  index, clk.id, ret);
356                         break;
357                 }
358         }
359
360 fail:
361         free(rates);
362         return ret;
363 }
364
365 int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
366 {
367         int ret;
368
369         if (!dev_has_ofnode(dev))
370                 return 0;
371
372         /*
373          * To avoid setting defaults twice, don't set them before relocation.
374          * However, still set them for SPL. And still set them if explicitly
375          * asked.
376          */
377         if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
378                 if (stage != CLK_DEFAULTS_POST_FORCE)
379                         return 0;
380
381         debug("%s(%s)\n", __func__, dev_read_name(dev));
382
383         ret = clk_set_default_parents(dev, stage);
384         if (ret)
385                 return ret;
386
387         ret = clk_set_default_rates(dev, stage);
388         if (ret < 0)
389                 return ret;
390
391         return 0;
392 }
393
394 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
395 {
396         return clk_get_by_name_nodev(dev_ofnode(dev), name, clk);
397 }
398 #endif /* OF_REAL */
399
400 int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
401 {
402         int index;
403
404         debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
405                 ofnode_get_name(node), name, clk);
406         clk->dev = NULL;
407
408         index = ofnode_stringlist_search(node, "clock-names", name);
409         if (index < 0) {
410                 debug("fdt_stringlist_search() failed: %d\n", index);
411                 return index;
412         }
413
414         return clk_get_by_index_nodev(node, index, clk);
415 }
416
417 int clk_release_all(struct clk *clk, int count)
418 {
419         int i, ret;
420
421         for (i = 0; i < count; i++) {
422                 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
423
424                 /* check if clock has been previously requested */
425                 if (!clk[i].dev)
426                         continue;
427
428                 ret = clk_disable(&clk[i]);
429                 if (ret && ret != -ENOSYS)
430                         return ret;
431
432                 clk_free(&clk[i]);
433         }
434
435         return 0;
436 }
437
438 int clk_request(struct udevice *dev, struct clk *clk)
439 {
440         const struct clk_ops *ops;
441
442         debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
443         if (!clk)
444                 return 0;
445         ops = clk_dev_ops(dev);
446
447         clk->dev = dev;
448
449         if (!ops->request)
450                 return 0;
451
452         return ops->request(clk);
453 }
454
455 void clk_free(struct clk *clk)
456 {
457         const struct clk_ops *ops;
458
459         debug("%s(clk=%p)\n", __func__, clk);
460         if (!clk_valid(clk))
461                 return;
462         ops = clk_dev_ops(clk->dev);
463
464         if (ops->rfree)
465                 ops->rfree(clk);
466         return;
467 }
468
469 ulong clk_get_rate(struct clk *clk)
470 {
471         const struct clk_ops *ops;
472         int ret;
473
474         debug("%s(clk=%p)\n", __func__, clk);
475         if (!clk_valid(clk))
476                 return 0;
477         ops = clk_dev_ops(clk->dev);
478
479         if (!ops->get_rate)
480                 return -ENOSYS;
481
482         ret = ops->get_rate(clk);
483         if (ret)
484                 return log_ret(ret);
485
486         return 0;
487 }
488
489 struct clk *clk_get_parent(struct clk *clk)
490 {
491         struct udevice *pdev;
492         struct clk *pclk;
493
494         debug("%s(clk=%p)\n", __func__, clk);
495         if (!clk_valid(clk))
496                 return NULL;
497
498         pdev = dev_get_parent(clk->dev);
499         if (!pdev)
500                 return ERR_PTR(-ENODEV);
501         pclk = dev_get_clk_ptr(pdev);
502         if (!pclk)
503                 return ERR_PTR(-ENODEV);
504
505         return pclk;
506 }
507
508 long long clk_get_parent_rate(struct clk *clk)
509 {
510         const struct clk_ops *ops;
511         struct clk *pclk;
512
513         debug("%s(clk=%p)\n", __func__, clk);
514         if (!clk_valid(clk))
515                 return 0;
516
517         pclk = clk_get_parent(clk);
518         if (IS_ERR(pclk))
519                 return -ENODEV;
520
521         ops = clk_dev_ops(pclk->dev);
522         if (!ops->get_rate)
523                 return -ENOSYS;
524
525         /* Read the 'rate' if not already set or if proper flag set*/
526         if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
527                 pclk->rate = clk_get_rate(pclk);
528
529         return pclk->rate;
530 }
531
532 ulong clk_round_rate(struct clk *clk, ulong rate)
533 {
534         const struct clk_ops *ops;
535
536         debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
537         if (!clk_valid(clk))
538                 return 0;
539
540         ops = clk_dev_ops(clk->dev);
541         if (!ops->round_rate)
542                 return -ENOSYS;
543
544         return ops->round_rate(clk, rate);
545 }
546
547 static void clk_clean_rate_cache(struct clk *clk)
548 {
549         struct udevice *child_dev;
550         struct clk *clkp;
551
552         if (!clk)
553                 return;
554
555         clk->rate = 0;
556
557         list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
558                 clkp = dev_get_clk_ptr(child_dev);
559                 clk_clean_rate_cache(clkp);
560         }
561 }
562
563 ulong clk_set_rate(struct clk *clk, ulong rate)
564 {
565         const struct clk_ops *ops;
566
567         debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
568         if (!clk_valid(clk))
569                 return 0;
570         ops = clk_dev_ops(clk->dev);
571
572         if (!ops->set_rate)
573                 return -ENOSYS;
574
575         /* Clean up cached rates for us and all child clocks */
576         clk_clean_rate_cache(clk);
577
578         return ops->set_rate(clk, rate);
579 }
580
581 int clk_set_parent(struct clk *clk, struct clk *parent)
582 {
583         const struct clk_ops *ops;
584         int ret;
585
586         debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
587         if (!clk_valid(clk))
588                 return 0;
589         ops = clk_dev_ops(clk->dev);
590
591         if (!ops->set_parent)
592                 return -ENOSYS;
593
594         ret = ops->set_parent(clk, parent);
595         if (ret)
596                 return ret;
597
598         if (CONFIG_IS_ENABLED(CLK_CCF))
599                 ret = device_reparent(clk->dev, parent->dev);
600
601         return ret;
602 }
603
604 int clk_enable(struct clk *clk)
605 {
606         const struct clk_ops *ops;
607         struct clk *clkp = NULL;
608         int ret;
609
610         debug("%s(clk=%p)\n", __func__, clk);
611         if (!clk_valid(clk))
612                 return 0;
613         ops = clk_dev_ops(clk->dev);
614
615         if (CONFIG_IS_ENABLED(CLK_CCF)) {
616                 /* Take id 0 as a non-valid clk, such as dummy */
617                 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
618                         if (clkp->enable_count) {
619                                 clkp->enable_count++;
620                                 return 0;
621                         }
622                         if (clkp->dev->parent &&
623                             device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
624                                 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
625                                 if (ret) {
626                                         printf("Enable %s failed\n",
627                                                clkp->dev->parent->name);
628                                         return ret;
629                                 }
630                         }
631                 }
632
633                 if (ops->enable) {
634                         ret = ops->enable(clk);
635                         if (ret) {
636                                 printf("Enable %s failed\n", clk->dev->name);
637                                 return ret;
638                         }
639                 }
640                 if (clkp)
641                         clkp->enable_count++;
642         } else {
643                 if (!ops->enable)
644                         return -ENOSYS;
645                 return ops->enable(clk);
646         }
647
648         return 0;
649 }
650
651 int clk_enable_bulk(struct clk_bulk *bulk)
652 {
653         int i, ret;
654
655         for (i = 0; i < bulk->count; i++) {
656                 ret = clk_enable(&bulk->clks[i]);
657                 if (ret < 0 && ret != -ENOSYS)
658                         return ret;
659         }
660
661         return 0;
662 }
663
664 int clk_disable(struct clk *clk)
665 {
666         const struct clk_ops *ops;
667         struct clk *clkp = NULL;
668         int ret;
669
670         debug("%s(clk=%p)\n", __func__, clk);
671         if (!clk_valid(clk))
672                 return 0;
673         ops = clk_dev_ops(clk->dev);
674
675         if (CONFIG_IS_ENABLED(CLK_CCF)) {
676                 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
677                         if (clkp->flags & CLK_IS_CRITICAL)
678                                 return 0;
679
680                         if (clkp->enable_count == 0) {
681                                 printf("clk %s already disabled\n",
682                                        clkp->dev->name);
683                                 return 0;
684                         }
685
686                         if (--clkp->enable_count > 0)
687                                 return 0;
688                 }
689
690                 if (ops->disable) {
691                         ret = ops->disable(clk);
692                         if (ret)
693                                 return ret;
694                 }
695
696                 if (clkp && clkp->dev->parent &&
697                     device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
698                         ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
699                         if (ret) {
700                                 printf("Disable %s failed\n",
701                                        clkp->dev->parent->name);
702                                 return ret;
703                         }
704                 }
705         } else {
706                 if (!ops->disable)
707                         return -ENOSYS;
708
709                 return ops->disable(clk);
710         }
711
712         return 0;
713 }
714
715 int clk_disable_bulk(struct clk_bulk *bulk)
716 {
717         int i, ret;
718
719         for (i = 0; i < bulk->count; i++) {
720                 ret = clk_disable(&bulk->clks[i]);
721                 if (ret < 0 && ret != -ENOSYS)
722                         return ret;
723         }
724
725         return 0;
726 }
727
728 int clk_get_by_id(ulong id, struct clk **clkp)
729 {
730         struct udevice *dev;
731         struct uclass *uc;
732         int ret;
733
734         ret = uclass_get(UCLASS_CLK, &uc);
735         if (ret)
736                 return ret;
737
738         uclass_foreach_dev(dev, uc) {
739                 struct clk *clk = dev_get_clk_ptr(dev);
740
741                 if (clk && clk->id == id) {
742                         *clkp = clk;
743                         return 0;
744                 }
745         }
746
747         return -ENOENT;
748 }
749
750 bool clk_is_match(const struct clk *p, const struct clk *q)
751 {
752         /* trivial case: identical struct clk's or both NULL */
753         if (p == q)
754                 return true;
755
756         /* trivial case #2: on the clk pointer is NULL */
757         if (!p || !q)
758                 return false;
759
760         /* same device, id and data */
761         if (p->dev == q->dev && p->id == q->id && p->data == q->data)
762                 return true;
763
764         return false;
765 }
766
767 static void devm_clk_release(struct udevice *dev, void *res)
768 {
769         clk_free(res);
770 }
771
772 static int devm_clk_match(struct udevice *dev, void *res, void *data)
773 {
774         return res == data;
775 }
776
777 struct clk *devm_clk_get(struct udevice *dev, const char *id)
778 {
779         int rc;
780         struct clk *clk;
781
782         clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
783         if (unlikely(!clk))
784                 return ERR_PTR(-ENOMEM);
785
786         rc = clk_get_by_name(dev, id, clk);
787         if (rc)
788                 return ERR_PTR(rc);
789
790         devres_add(dev, clk);
791         return clk;
792 }
793
794 void devm_clk_put(struct udevice *dev, struct clk *clk)
795 {
796         int rc;
797
798         if (!clk)
799                 return;
800
801         rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
802         WARN_ON(rc);
803 }
804
805 int clk_uclass_post_probe(struct udevice *dev)
806 {
807         /*
808          * when a clock provider is probed. Call clk_set_defaults()
809          * also after the device is probed. This takes care of cases
810          * where the DT is used to setup default parents and rates
811          * using assigned-clocks
812          */
813         clk_set_defaults(dev, CLK_DEFAULTS_POST);
814
815         return 0;
816 }
817
818 UCLASS_DRIVER(clk) = {
819         .id             = UCLASS_CLK,
820         .name           = "clk",
821         .post_probe     = clk_uclass_post_probe,
822 };