Merge tag 'for-5.1-part2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[platform/kernel/linux-starfive.git] / drivers / opp / of.c
1 /*
2  * Generic OPP OF helpers
3  *
4  * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5  *      Nishanth Menon
6  *      Romit Dasgupta
7  *      Kevin Hilman
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/cpu.h>
17 #include <linux/errno.h>
18 #include <linux/device.h>
19 #include <linux/of_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/energy_model.h>
24
25 #include "opp.h"
26
27 /*
28  * Returns opp descriptor node for a device node, caller must
29  * do of_node_put().
30  */
31 static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
32                                                      int index)
33 {
34         /* "operating-points-v2" can be an array for power domain providers */
35         return of_parse_phandle(np, "operating-points-v2", index);
36 }
37
38 /* Returns opp descriptor node for a device, caller must do of_node_put() */
39 struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
40 {
41         return _opp_of_get_opp_desc_node(dev->of_node, 0);
42 }
43 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
44
45 struct opp_table *_managed_opp(struct device *dev, int index)
46 {
47         struct opp_table *opp_table, *managed_table = NULL;
48         struct device_node *np;
49
50         np = _opp_of_get_opp_desc_node(dev->of_node, index);
51         if (!np)
52                 return NULL;
53
54         list_for_each_entry(opp_table, &opp_tables, node) {
55                 if (opp_table->np == np) {
56                         /*
57                          * Multiple devices can point to the same OPP table and
58                          * so will have same node-pointer, np.
59                          *
60                          * But the OPPs will be considered as shared only if the
61                          * OPP table contains a "opp-shared" property.
62                          */
63                         if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
64                                 _get_opp_table_kref(opp_table);
65                                 managed_table = opp_table;
66                         }
67
68                         break;
69                 }
70         }
71
72         of_node_put(np);
73
74         return managed_table;
75 }
76
77 /* The caller must call dev_pm_opp_put() after the OPP is used */
78 static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
79                                           struct device_node *opp_np)
80 {
81         struct dev_pm_opp *opp;
82
83         lockdep_assert_held(&opp_table_lock);
84
85         mutex_lock(&opp_table->lock);
86
87         list_for_each_entry(opp, &opp_table->opp_list, node) {
88                 if (opp->np == opp_np) {
89                         dev_pm_opp_get(opp);
90                         mutex_unlock(&opp_table->lock);
91                         return opp;
92                 }
93         }
94
95         mutex_unlock(&opp_table->lock);
96
97         return NULL;
98 }
99
100 static struct device_node *of_parse_required_opp(struct device_node *np,
101                                                  int index)
102 {
103         struct device_node *required_np;
104
105         required_np = of_parse_phandle(np, "required-opps", index);
106         if (unlikely(!required_np)) {
107                 pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
108                        __func__, np, index);
109         }
110
111         return required_np;
112 }
113
114 /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
115 static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
116 {
117         struct opp_table *opp_table;
118         struct device_node *opp_table_np;
119
120         lockdep_assert_held(&opp_table_lock);
121
122         opp_table_np = of_get_parent(opp_np);
123         if (!opp_table_np)
124                 goto err;
125
126         /* It is safe to put the node now as all we need now is its address */
127         of_node_put(opp_table_np);
128
129         list_for_each_entry(opp_table, &opp_tables, node) {
130                 if (opp_table_np == opp_table->np) {
131                         _get_opp_table_kref(opp_table);
132                         return opp_table;
133                 }
134         }
135
136 err:
137         return ERR_PTR(-ENODEV);
138 }
139
140 /* Free resources previously acquired by _opp_table_alloc_required_tables() */
141 static void _opp_table_free_required_tables(struct opp_table *opp_table)
142 {
143         struct opp_table **required_opp_tables = opp_table->required_opp_tables;
144         struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
145         int i;
146
147         if (!required_opp_tables)
148                 return;
149
150         for (i = 0; i < opp_table->required_opp_count; i++) {
151                 if (IS_ERR_OR_NULL(required_opp_tables[i]))
152                         break;
153
154                 dev_pm_opp_put_opp_table(required_opp_tables[i]);
155         }
156
157         kfree(required_opp_tables);
158         kfree(genpd_virt_devs);
159
160         opp_table->required_opp_count = 0;
161         opp_table->genpd_virt_devs = NULL;
162         opp_table->required_opp_tables = NULL;
163 }
164
165 /*
166  * Populate all devices and opp tables which are part of "required-opps" list.
167  * Checking only the first OPP node should be enough.
168  */
169 static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
170                                              struct device *dev,
171                                              struct device_node *opp_np)
172 {
173         struct opp_table **required_opp_tables;
174         struct device **genpd_virt_devs = NULL;
175         struct device_node *required_np, *np;
176         int count, i;
177
178         /* Traversing the first OPP node is all we need */
179         np = of_get_next_available_child(opp_np, NULL);
180         if (!np) {
181                 dev_err(dev, "Empty OPP table\n");
182                 return;
183         }
184
185         count = of_count_phandle_with_args(np, "required-opps", NULL);
186         if (!count)
187                 goto put_np;
188
189         if (count > 1) {
190                 genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs),
191                                         GFP_KERNEL);
192                 if (!genpd_virt_devs)
193                         goto put_np;
194         }
195
196         required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
197                                       GFP_KERNEL);
198         if (!required_opp_tables) {
199                 kfree(genpd_virt_devs);
200                 goto put_np;
201         }
202
203         opp_table->genpd_virt_devs = genpd_virt_devs;
204         opp_table->required_opp_tables = required_opp_tables;
205         opp_table->required_opp_count = count;
206
207         for (i = 0; i < count; i++) {
208                 required_np = of_parse_required_opp(np, i);
209                 if (!required_np)
210                         goto free_required_tables;
211
212                 required_opp_tables[i] = _find_table_of_opp_np(required_np);
213                 of_node_put(required_np);
214
215                 if (IS_ERR(required_opp_tables[i]))
216                         goto free_required_tables;
217
218                 /*
219                  * We only support genpd's OPPs in the "required-opps" for now,
220                  * as we don't know how much about other cases. Error out if the
221                  * required OPP doesn't belong to a genpd.
222                  */
223                 if (!required_opp_tables[i]->is_genpd) {
224                         dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n",
225                                 required_np);
226                         goto free_required_tables;
227                 }
228         }
229
230         goto put_np;
231
232 free_required_tables:
233         _opp_table_free_required_tables(opp_table);
234 put_np:
235         of_node_put(np);
236 }
237
238 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
239                         int index)
240 {
241         struct device_node *np, *opp_np;
242         u32 val;
243
244         /*
245          * Only required for backward compatibility with v1 bindings, but isn't
246          * harmful for other cases. And so we do it unconditionally.
247          */
248         np = of_node_get(dev->of_node);
249         if (!np)
250                 return;
251
252         if (!of_property_read_u32(np, "clock-latency", &val))
253                 opp_table->clock_latency_ns_max = val;
254         of_property_read_u32(np, "voltage-tolerance",
255                              &opp_table->voltage_tolerance_v1);
256
257         if (of_find_property(np, "#power-domain-cells", NULL))
258                 opp_table->is_genpd = true;
259
260         /* Get OPP table node */
261         opp_np = _opp_of_get_opp_desc_node(np, index);
262         of_node_put(np);
263
264         if (!opp_np)
265                 return;
266
267         if (of_property_read_bool(opp_np, "opp-shared"))
268                 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
269         else
270                 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
271
272         opp_table->np = opp_np;
273
274         _opp_table_alloc_required_tables(opp_table, dev, opp_np);
275         of_node_put(opp_np);
276 }
277
278 void _of_clear_opp_table(struct opp_table *opp_table)
279 {
280         _opp_table_free_required_tables(opp_table);
281 }
282
283 /*
284  * Release all resources previously acquired with a call to
285  * _of_opp_alloc_required_opps().
286  */
287 void _of_opp_free_required_opps(struct opp_table *opp_table,
288                                 struct dev_pm_opp *opp)
289 {
290         struct dev_pm_opp **required_opps = opp->required_opps;
291         int i;
292
293         if (!required_opps)
294                 return;
295
296         for (i = 0; i < opp_table->required_opp_count; i++) {
297                 if (!required_opps[i])
298                         break;
299
300                 /* Put the reference back */
301                 dev_pm_opp_put(required_opps[i]);
302         }
303
304         kfree(required_opps);
305         opp->required_opps = NULL;
306 }
307
308 /* Populate all required OPPs which are part of "required-opps" list */
309 static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
310                                        struct dev_pm_opp *opp)
311 {
312         struct dev_pm_opp **required_opps;
313         struct opp_table *required_table;
314         struct device_node *np;
315         int i, ret, count = opp_table->required_opp_count;
316
317         if (!count)
318                 return 0;
319
320         required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
321         if (!required_opps)
322                 return -ENOMEM;
323
324         opp->required_opps = required_opps;
325
326         for (i = 0; i < count; i++) {
327                 required_table = opp_table->required_opp_tables[i];
328
329                 np = of_parse_required_opp(opp->np, i);
330                 if (unlikely(!np)) {
331                         ret = -ENODEV;
332                         goto free_required_opps;
333                 }
334
335                 required_opps[i] = _find_opp_of_np(required_table, np);
336                 of_node_put(np);
337
338                 if (!required_opps[i]) {
339                         pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
340                                __func__, opp->np, i);
341                         ret = -ENODEV;
342                         goto free_required_opps;
343                 }
344         }
345
346         return 0;
347
348 free_required_opps:
349         _of_opp_free_required_opps(opp_table, opp);
350
351         return ret;
352 }
353
354 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
355                               struct device_node *np)
356 {
357         unsigned int count = opp_table->supported_hw_count;
358         u32 version;
359         int ret;
360
361         if (!opp_table->supported_hw) {
362                 /*
363                  * In the case that no supported_hw has been set by the
364                  * platform but there is an opp-supported-hw value set for
365                  * an OPP then the OPP should not be enabled as there is
366                  * no way to see if the hardware supports it.
367                  */
368                 if (of_find_property(np, "opp-supported-hw", NULL))
369                         return false;
370                 else
371                         return true;
372         }
373
374         while (count--) {
375                 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
376                                                  &version);
377                 if (ret) {
378                         dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
379                                  __func__, count, ret);
380                         return false;
381                 }
382
383                 /* Both of these are bitwise masks of the versions */
384                 if (!(version & opp_table->supported_hw[count]))
385                         return false;
386         }
387
388         return true;
389 }
390
391 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
392                               struct opp_table *opp_table)
393 {
394         u32 *microvolt, *microamp = NULL;
395         int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
396         struct property *prop = NULL;
397         char name[NAME_MAX];
398
399         /* Search for "opp-microvolt-<name>" */
400         if (opp_table->prop_name) {
401                 snprintf(name, sizeof(name), "opp-microvolt-%s",
402                          opp_table->prop_name);
403                 prop = of_find_property(opp->np, name, NULL);
404         }
405
406         if (!prop) {
407                 /* Search for "opp-microvolt" */
408                 sprintf(name, "opp-microvolt");
409                 prop = of_find_property(opp->np, name, NULL);
410
411                 /* Missing property isn't a problem, but an invalid entry is */
412                 if (!prop) {
413                         if (unlikely(supplies == -1)) {
414                                 /* Initialize regulator_count */
415                                 opp_table->regulator_count = 0;
416                                 return 0;
417                         }
418
419                         if (!supplies)
420                                 return 0;
421
422                         dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
423                                 __func__);
424                         return -EINVAL;
425                 }
426         }
427
428         if (unlikely(supplies == -1)) {
429                 /* Initialize regulator_count */
430                 supplies = opp_table->regulator_count = 1;
431         } else if (unlikely(!supplies)) {
432                 dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
433                 return -EINVAL;
434         }
435
436         vcount = of_property_count_u32_elems(opp->np, name);
437         if (vcount < 0) {
438                 dev_err(dev, "%s: Invalid %s property (%d)\n",
439                         __func__, name, vcount);
440                 return vcount;
441         }
442
443         /* There can be one or three elements per supply */
444         if (vcount != supplies && vcount != supplies * 3) {
445                 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
446                         __func__, name, vcount, supplies);
447                 return -EINVAL;
448         }
449
450         microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
451         if (!microvolt)
452                 return -ENOMEM;
453
454         ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
455         if (ret) {
456                 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
457                 ret = -EINVAL;
458                 goto free_microvolt;
459         }
460
461         /* Search for "opp-microamp-<name>" */
462         prop = NULL;
463         if (opp_table->prop_name) {
464                 snprintf(name, sizeof(name), "opp-microamp-%s",
465                          opp_table->prop_name);
466                 prop = of_find_property(opp->np, name, NULL);
467         }
468
469         if (!prop) {
470                 /* Search for "opp-microamp" */
471                 sprintf(name, "opp-microamp");
472                 prop = of_find_property(opp->np, name, NULL);
473         }
474
475         if (prop) {
476                 icount = of_property_count_u32_elems(opp->np, name);
477                 if (icount < 0) {
478                         dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
479                                 name, icount);
480                         ret = icount;
481                         goto free_microvolt;
482                 }
483
484                 if (icount != supplies) {
485                         dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
486                                 __func__, name, icount, supplies);
487                         ret = -EINVAL;
488                         goto free_microvolt;
489                 }
490
491                 microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
492                 if (!microamp) {
493                         ret = -EINVAL;
494                         goto free_microvolt;
495                 }
496
497                 ret = of_property_read_u32_array(opp->np, name, microamp,
498                                                  icount);
499                 if (ret) {
500                         dev_err(dev, "%s: error parsing %s: %d\n", __func__,
501                                 name, ret);
502                         ret = -EINVAL;
503                         goto free_microamp;
504                 }
505         }
506
507         for (i = 0, j = 0; i < supplies; i++) {
508                 opp->supplies[i].u_volt = microvolt[j++];
509
510                 if (vcount == supplies) {
511                         opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
512                         opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
513                 } else {
514                         opp->supplies[i].u_volt_min = microvolt[j++];
515                         opp->supplies[i].u_volt_max = microvolt[j++];
516                 }
517
518                 if (microamp)
519                         opp->supplies[i].u_amp = microamp[i];
520         }
521
522 free_microamp:
523         kfree(microamp);
524 free_microvolt:
525         kfree(microvolt);
526
527         return ret;
528 }
529
530 /**
531  * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
532  *                                entries
533  * @dev:        device pointer used to lookup OPP table.
534  *
535  * Free OPPs created using static entries present in DT.
536  */
537 void dev_pm_opp_of_remove_table(struct device *dev)
538 {
539         _dev_pm_opp_find_and_remove_table(dev);
540 }
541 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
542
543 /**
544  * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
545  * @opp_table:  OPP table
546  * @dev:        device for which we do this operation
547  * @np:         device node
548  *
549  * This function adds an opp definition to the opp table and returns status. The
550  * opp can be controlled using dev_pm_opp_enable/disable functions and may be
551  * removed by dev_pm_opp_remove.
552  *
553  * Return:
554  * Valid OPP pointer:
555  *              On success
556  * NULL:
557  *              Duplicate OPPs (both freq and volt are same) and opp->available
558  *              OR if the OPP is not supported by hardware.
559  * ERR_PTR(-EEXIST):
560  *              Freq are same and volt are different OR
561  *              Duplicate OPPs (both freq and volt are same) and !opp->available
562  * ERR_PTR(-ENOMEM):
563  *              Memory allocation failure
564  * ERR_PTR(-EINVAL):
565  *              Failed parsing the OPP node
566  */
567 static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
568                 struct device *dev, struct device_node *np)
569 {
570         struct dev_pm_opp *new_opp;
571         u64 rate = 0;
572         u32 val;
573         int ret;
574         bool rate_not_available = false;
575
576         new_opp = _opp_allocate(opp_table);
577         if (!new_opp)
578                 return ERR_PTR(-ENOMEM);
579
580         ret = of_property_read_u64(np, "opp-hz", &rate);
581         if (ret < 0) {
582                 /* "opp-hz" is optional for devices like power domains. */
583                 if (!opp_table->is_genpd) {
584                         dev_err(dev, "%s: opp-hz not found\n", __func__);
585                         goto free_opp;
586                 }
587
588                 rate_not_available = true;
589         } else {
590                 /*
591                  * Rate is defined as an unsigned long in clk API, and so
592                  * casting explicitly to its type. Must be fixed once rate is 64
593                  * bit guaranteed in clk API.
594                  */
595                 new_opp->rate = (unsigned long)rate;
596         }
597
598         of_property_read_u32(np, "opp-level", &new_opp->level);
599
600         /* Check if the OPP supports hardware's hierarchy of versions or not */
601         if (!_opp_is_supported(dev, opp_table, np)) {
602                 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
603                 goto free_opp;
604         }
605
606         new_opp->turbo = of_property_read_bool(np, "turbo-mode");
607
608         new_opp->np = np;
609         new_opp->dynamic = false;
610         new_opp->available = true;
611
612         ret = _of_opp_alloc_required_opps(opp_table, new_opp);
613         if (ret)
614                 goto free_opp;
615
616         if (!of_property_read_u32(np, "clock-latency-ns", &val))
617                 new_opp->clock_latency_ns = val;
618
619         ret = opp_parse_supplies(new_opp, dev, opp_table);
620         if (ret)
621                 goto free_required_opps;
622
623         if (opp_table->is_genpd)
624                 new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
625
626         ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
627         if (ret) {
628                 /* Don't return error for duplicate OPPs */
629                 if (ret == -EBUSY)
630                         ret = 0;
631                 goto free_required_opps;
632         }
633
634         /* OPP to select on device suspend */
635         if (of_property_read_bool(np, "opp-suspend")) {
636                 if (opp_table->suspend_opp) {
637                         dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
638                                  __func__, opp_table->suspend_opp->rate,
639                                  new_opp->rate);
640                 } else {
641                         new_opp->suspend = true;
642                         opp_table->suspend_opp = new_opp;
643                 }
644         }
645
646         if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
647                 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
648
649         pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
650                  __func__, new_opp->turbo, new_opp->rate,
651                  new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
652                  new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
653
654         /*
655          * Notify the changes in the availability of the operable
656          * frequency/voltage list.
657          */
658         blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
659         return new_opp;
660
661 free_required_opps:
662         _of_opp_free_required_opps(opp_table, new_opp);
663 free_opp:
664         _opp_free(new_opp);
665
666         return ERR_PTR(ret);
667 }
668
669 /* Initializes OPP tables based on new bindings */
670 static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
671 {
672         struct device_node *np;
673         int ret, count = 0, pstate_count = 0;
674         struct dev_pm_opp *opp;
675
676         /* OPP table is already initialized for the device */
677         if (opp_table->parsed_static_opps) {
678                 kref_get(&opp_table->list_kref);
679                 return 0;
680         }
681
682         kref_init(&opp_table->list_kref);
683
684         /* We have opp-table node now, iterate over it and add OPPs */
685         for_each_available_child_of_node(opp_table->np, np) {
686                 opp = _opp_add_static_v2(opp_table, dev, np);
687                 if (IS_ERR(opp)) {
688                         ret = PTR_ERR(opp);
689                         dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
690                                 ret);
691                         of_node_put(np);
692                         goto put_list_kref;
693                 } else if (opp) {
694                         count++;
695                 }
696         }
697
698         /* There should be one of more OPP defined */
699         if (WARN_ON(!count)) {
700                 ret = -ENOENT;
701                 goto put_list_kref;
702         }
703
704         list_for_each_entry(opp, &opp_table->opp_list, node)
705                 pstate_count += !!opp->pstate;
706
707         /* Either all or none of the nodes shall have performance state set */
708         if (pstate_count && pstate_count != count) {
709                 dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
710                         count, pstate_count);
711                 ret = -ENOENT;
712                 goto put_list_kref;
713         }
714
715         if (pstate_count)
716                 opp_table->genpd_performance_state = true;
717
718         opp_table->parsed_static_opps = true;
719
720         return 0;
721
722 put_list_kref:
723         _put_opp_list_kref(opp_table);
724
725         return ret;
726 }
727
728 /* Initializes OPP tables based on old-deprecated bindings */
729 static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
730 {
731         const struct property *prop;
732         const __be32 *val;
733         int nr, ret = 0;
734
735         prop = of_find_property(dev->of_node, "operating-points", NULL);
736         if (!prop)
737                 return -ENODEV;
738         if (!prop->value)
739                 return -ENODATA;
740
741         /*
742          * Each OPP is a set of tuples consisting of frequency and
743          * voltage like <freq-kHz vol-uV>.
744          */
745         nr = prop->length / sizeof(u32);
746         if (nr % 2) {
747                 dev_err(dev, "%s: Invalid OPP table\n", __func__);
748                 return -EINVAL;
749         }
750
751         kref_init(&opp_table->list_kref);
752
753         val = prop->value;
754         while (nr) {
755                 unsigned long freq = be32_to_cpup(val++) * 1000;
756                 unsigned long volt = be32_to_cpup(val++);
757
758                 ret = _opp_add_v1(opp_table, dev, freq, volt, false);
759                 if (ret) {
760                         dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
761                                 __func__, freq, ret);
762                         _put_opp_list_kref(opp_table);
763                         return ret;
764                 }
765                 nr -= 2;
766         }
767
768         return ret;
769 }
770
771 /**
772  * dev_pm_opp_of_add_table() - Initialize opp table from device tree
773  * @dev:        device pointer used to lookup OPP table.
774  *
775  * Register the initial OPP table with the OPP library for given device.
776  *
777  * Return:
778  * 0            On success OR
779  *              Duplicate OPPs (both freq and volt are same) and opp->available
780  * -EEXIST      Freq are same and volt are different OR
781  *              Duplicate OPPs (both freq and volt are same) and !opp->available
782  * -ENOMEM      Memory allocation failure
783  * -ENODEV      when 'operating-points' property is not found or is invalid data
784  *              in device node.
785  * -ENODATA     when empty 'operating-points' property is found
786  * -EINVAL      when invalid entries are found in opp-v2 table
787  */
788 int dev_pm_opp_of_add_table(struct device *dev)
789 {
790         struct opp_table *opp_table;
791         int ret;
792
793         opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
794         if (!opp_table)
795                 return -ENOMEM;
796
797         /*
798          * OPPs have two version of bindings now. Also try the old (v1)
799          * bindings for backward compatibility with older dtbs.
800          */
801         if (opp_table->np)
802                 ret = _of_add_opp_table_v2(dev, opp_table);
803         else
804                 ret = _of_add_opp_table_v1(dev, opp_table);
805
806         if (ret)
807                 dev_pm_opp_put_opp_table(opp_table);
808
809         return ret;
810 }
811 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
812
813 /**
814  * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
815  * @dev:        device pointer used to lookup OPP table.
816  * @index:      Index number.
817  *
818  * Register the initial OPP table with the OPP library for given device only
819  * using the "operating-points-v2" property.
820  *
821  * Return:
822  * 0            On success OR
823  *              Duplicate OPPs (both freq and volt are same) and opp->available
824  * -EEXIST      Freq are same and volt are different OR
825  *              Duplicate OPPs (both freq and volt are same) and !opp->available
826  * -ENOMEM      Memory allocation failure
827  * -ENODEV      when 'operating-points' property is not found or is invalid data
828  *              in device node.
829  * -ENODATA     when empty 'operating-points' property is found
830  * -EINVAL      when invalid entries are found in opp-v2 table
831  */
832 int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
833 {
834         struct opp_table *opp_table;
835         int ret, count;
836
837         if (index) {
838                 /*
839                  * If only one phandle is present, then the same OPP table
840                  * applies for all index requests.
841                  */
842                 count = of_count_phandle_with_args(dev->of_node,
843                                                    "operating-points-v2", NULL);
844                 if (count == 1)
845                         index = 0;
846         }
847
848         opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
849         if (!opp_table)
850                 return -ENOMEM;
851
852         ret = _of_add_opp_table_v2(dev, opp_table);
853         if (ret)
854                 dev_pm_opp_put_opp_table(opp_table);
855
856         return ret;
857 }
858 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
859
860 /* CPU device specific helpers */
861
862 /**
863  * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
864  * @cpumask:    cpumask for which OPP table needs to be removed
865  *
866  * This removes the OPP tables for CPUs present in the @cpumask.
867  * This should be used only to remove static entries created from DT.
868  */
869 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
870 {
871         _dev_pm_opp_cpumask_remove_table(cpumask, -1);
872 }
873 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
874
875 /**
876  * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
877  * @cpumask:    cpumask for which OPP table needs to be added.
878  *
879  * This adds the OPP tables for CPUs present in the @cpumask.
880  */
881 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
882 {
883         struct device *cpu_dev;
884         int cpu, ret;
885
886         if (WARN_ON(cpumask_empty(cpumask)))
887                 return -ENODEV;
888
889         for_each_cpu(cpu, cpumask) {
890                 cpu_dev = get_cpu_device(cpu);
891                 if (!cpu_dev) {
892                         pr_err("%s: failed to get cpu%d device\n", __func__,
893                                cpu);
894                         ret = -ENODEV;
895                         goto remove_table;
896                 }
897
898                 ret = dev_pm_opp_of_add_table(cpu_dev);
899                 if (ret) {
900                         /*
901                          * OPP may get registered dynamically, don't print error
902                          * message here.
903                          */
904                         pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
905                                  __func__, cpu, ret);
906
907                         goto remove_table;
908                 }
909         }
910
911         return 0;
912
913 remove_table:
914         /* Free all other OPPs */
915         _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
916
917         return ret;
918 }
919 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
920
921 /*
922  * Works only for OPP v2 bindings.
923  *
924  * Returns -ENOENT if operating-points-v2 bindings aren't supported.
925  */
926 /**
927  * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
928  *                                    @cpu_dev using operating-points-v2
929  *                                    bindings.
930  *
931  * @cpu_dev:    CPU device for which we do this operation
932  * @cpumask:    cpumask to update with information of sharing CPUs
933  *
934  * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
935  *
936  * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
937  */
938 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
939                                    struct cpumask *cpumask)
940 {
941         struct device_node *np, *tmp_np, *cpu_np;
942         int cpu, ret = 0;
943
944         /* Get OPP descriptor node */
945         np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
946         if (!np) {
947                 dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
948                 return -ENOENT;
949         }
950
951         cpumask_set_cpu(cpu_dev->id, cpumask);
952
953         /* OPPs are shared ? */
954         if (!of_property_read_bool(np, "opp-shared"))
955                 goto put_cpu_node;
956
957         for_each_possible_cpu(cpu) {
958                 if (cpu == cpu_dev->id)
959                         continue;
960
961                 cpu_np = of_cpu_device_node_get(cpu);
962                 if (!cpu_np) {
963                         dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
964                                 __func__, cpu);
965                         ret = -ENOENT;
966                         goto put_cpu_node;
967                 }
968
969                 /* Get OPP descriptor node */
970                 tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
971                 of_node_put(cpu_np);
972                 if (!tmp_np) {
973                         pr_err("%pOF: Couldn't find opp node\n", cpu_np);
974                         ret = -ENOENT;
975                         goto put_cpu_node;
976                 }
977
978                 /* CPUs are sharing opp node */
979                 if (np == tmp_np)
980                         cpumask_set_cpu(cpu, cpumask);
981
982                 of_node_put(tmp_np);
983         }
984
985 put_cpu_node:
986         of_node_put(np);
987         return ret;
988 }
989 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
990
991 /**
992  * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
993  * @np: Node that contains the "required-opps" property.
994  * @index: Index of the phandle to parse.
995  *
996  * Returns the performance state of the OPP pointed out by the "required-opps"
997  * property at @index in @np.
998  *
999  * Return: Zero or positive performance state on success, otherwise negative
1000  * value on errors.
1001  */
1002 int of_get_required_opp_performance_state(struct device_node *np, int index)
1003 {
1004         struct dev_pm_opp *opp;
1005         struct device_node *required_np;
1006         struct opp_table *opp_table;
1007         int pstate = -EINVAL;
1008
1009         required_np = of_parse_required_opp(np, index);
1010         if (!required_np)
1011                 return -EINVAL;
1012
1013         opp_table = _find_table_of_opp_np(required_np);
1014         if (IS_ERR(opp_table)) {
1015                 pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
1016                        __func__, np, PTR_ERR(opp_table));
1017                 goto put_required_np;
1018         }
1019
1020         opp = _find_opp_of_np(opp_table, required_np);
1021         if (opp) {
1022                 pstate = opp->pstate;
1023                 dev_pm_opp_put(opp);
1024         }
1025
1026         dev_pm_opp_put_opp_table(opp_table);
1027
1028 put_required_np:
1029         of_node_put(required_np);
1030
1031         return pstate;
1032 }
1033 EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
1034
1035 /**
1036  * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
1037  * @opp:        opp for which DT node has to be returned for
1038  *
1039  * Return: DT node corresponding to the opp, else 0 on success.
1040  *
1041  * The caller needs to put the node with of_node_put() after using it.
1042  */
1043 struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
1044 {
1045         if (IS_ERR_OR_NULL(opp)) {
1046                 pr_err("%s: Invalid parameters\n", __func__);
1047                 return NULL;
1048         }
1049
1050         return of_node_get(opp->np);
1051 }
1052 EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
1053
1054 /*
1055  * Callback function provided to the Energy Model framework upon registration.
1056  * This computes the power estimated by @CPU at @kHz if it is the frequency
1057  * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
1058  * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
1059  * frequency and @mW to the associated power. The power is estimated as
1060  * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
1061  * the voltage and frequency of the OPP.
1062  *
1063  * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
1064  * calculation failed because of missing parameters, 0 otherwise.
1065  */
1066 static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
1067                                          int cpu)
1068 {
1069         struct device *cpu_dev;
1070         struct dev_pm_opp *opp;
1071         struct device_node *np;
1072         unsigned long mV, Hz;
1073         u32 cap;
1074         u64 tmp;
1075         int ret;
1076
1077         cpu_dev = get_cpu_device(cpu);
1078         if (!cpu_dev)
1079                 return -ENODEV;
1080
1081         np = of_node_get(cpu_dev->of_node);
1082         if (!np)
1083                 return -EINVAL;
1084
1085         ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1086         of_node_put(np);
1087         if (ret)
1088                 return -EINVAL;
1089
1090         Hz = *kHz * 1000;
1091         opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
1092         if (IS_ERR(opp))
1093                 return -EINVAL;
1094
1095         mV = dev_pm_opp_get_voltage(opp) / 1000;
1096         dev_pm_opp_put(opp);
1097         if (!mV)
1098                 return -EINVAL;
1099
1100         tmp = (u64)cap * mV * mV * (Hz / 1000000);
1101         do_div(tmp, 1000000000);
1102
1103         *mW = (unsigned long)tmp;
1104         *kHz = Hz / 1000;
1105
1106         return 0;
1107 }
1108
1109 /**
1110  * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
1111  * @cpus        : CPUs for which an Energy Model has to be registered
1112  *
1113  * This checks whether the "dynamic-power-coefficient" devicetree property has
1114  * been specified, and tries to register an Energy Model with it if it has.
1115  */
1116 void dev_pm_opp_of_register_em(struct cpumask *cpus)
1117 {
1118         struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
1119         int ret, nr_opp, cpu = cpumask_first(cpus);
1120         struct device *cpu_dev;
1121         struct device_node *np;
1122         u32 cap;
1123
1124         cpu_dev = get_cpu_device(cpu);
1125         if (!cpu_dev)
1126                 return;
1127
1128         nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
1129         if (nr_opp <= 0)
1130                 return;
1131
1132         np = of_node_get(cpu_dev->of_node);
1133         if (!np)
1134                 return;
1135
1136         /*
1137          * Register an EM only if the 'dynamic-power-coefficient' property is
1138          * set in devicetree. It is assumed the voltage values are known if that
1139          * property is set since it is useless otherwise. If voltages are not
1140          * known, just let the EM registration fail with an error to alert the
1141          * user about the inconsistent configuration.
1142          */
1143         ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1144         of_node_put(np);
1145         if (ret || !cap)
1146                 return;
1147
1148         em_register_perf_domain(cpus, nr_opp, &em_cb);
1149 }
1150 EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);