drm/i915/gt: Serialize GRDOM access between multiple engine resets
[platform/kernel/linux-starfive.git] / drivers / interconnect / qcom / icc-rpm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Linaro Ltd
4  */
5
6 #include <linux/clk.h>
7 #include <linux/device.h>
8 #include <linux/interconnect-provider.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17
18 #include "smd-rpm.h"
19 #include "icc-rpm.h"
20
21 /* QNOC QoS */
22 #define QNOC_QOS_MCTL_LOWn_ADDR(n)      (0x8 + (n * 0x1000))
23 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK    0x70
24 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT   4
25 #define QNOC_QOS_MCTL_URGFWD_EN_MASK    0x8
26 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT   3
27
28 /* BIMC QoS */
29 #define M_BKE_REG_BASE(n)               (0x300 + (0x4000 * n))
30 #define M_BKE_EN_ADDR(n)                (M_BKE_REG_BASE(n))
31 #define M_BKE_HEALTH_CFG_ADDR(i, n)     (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
32
33 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
34 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK  0x300
35 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK   0x3
36 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
37 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
38
39 #define M_BKE_EN_EN_BMASK               0x1
40
41 /* NoC QoS */
42 #define NOC_QOS_PRIORITYn_ADDR(n)       (0x8 + (n * 0x1000))
43 #define NOC_QOS_PRIORITY_P1_MASK        0xc
44 #define NOC_QOS_PRIORITY_P0_MASK        0x3
45 #define NOC_QOS_PRIORITY_P1_SHIFT       0x2
46
47 #define NOC_QOS_MODEn_ADDR(n)           (0xc + (n * 0x1000))
48 #define NOC_QOS_MODEn_MASK              0x3
49
50 static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
51 {
52         struct icc_provider *provider = src->provider;
53         struct qcom_icc_provider *qp = to_qcom_provider(provider);
54         struct qcom_icc_node *qn = src->data;
55         struct qcom_icc_qos *qos = &qn->qos;
56         int rc;
57
58         rc = regmap_update_bits(qp->regmap,
59                         qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
60                         QNOC_QOS_MCTL_DFLT_PRIO_MASK,
61                         qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
62         if (rc)
63                 return rc;
64
65         return regmap_update_bits(qp->regmap,
66                         qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
67                         QNOC_QOS_MCTL_URGFWD_EN_MASK,
68                         !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
69 }
70
71 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
72                                         struct qcom_icc_qos *qos,
73                                         int regnum)
74 {
75         u32 val;
76         u32 mask;
77
78         val = qos->prio_level;
79         mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
80
81         val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
82         mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
83
84         /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
85         if (regnum != 3) {
86                 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
87                 mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
88         }
89
90         return regmap_update_bits(qp->regmap,
91                                   qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
92                                   mask, val);
93 }
94
95 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
96 {
97         struct qcom_icc_provider *qp;
98         struct qcom_icc_node *qn;
99         struct icc_provider *provider;
100         u32 mode = NOC_QOS_MODE_BYPASS;
101         u32 val = 0;
102         int i, rc = 0;
103
104         qn = src->data;
105         provider = src->provider;
106         qp = to_qcom_provider(provider);
107
108         if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
109                 mode = qn->qos.qos_mode;
110
111         /* QoS Priority: The QoS Health parameters are getting considered
112          * only if we are NOT in Bypass Mode.
113          */
114         if (mode != NOC_QOS_MODE_BYPASS) {
115                 for (i = 3; i >= 0; i--) {
116                         rc = qcom_icc_bimc_set_qos_health(qp,
117                                                           &qn->qos, i);
118                         if (rc)
119                                 return rc;
120                 }
121
122                 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
123                 val = 1;
124         }
125
126         return regmap_update_bits(qp->regmap,
127                                   qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
128                                   M_BKE_EN_EN_BMASK, val);
129 }
130
131 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
132                                          struct qcom_icc_qos *qos)
133 {
134         u32 val;
135         int rc;
136
137         /* Must be updated one at a time, P1 first, P0 last */
138         val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
139         rc = regmap_update_bits(qp->regmap,
140                                 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
141                                 NOC_QOS_PRIORITY_P1_MASK, val);
142         if (rc)
143                 return rc;
144
145         return regmap_update_bits(qp->regmap,
146                                   qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
147                                   NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
148 }
149
150 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
151 {
152         struct qcom_icc_provider *qp;
153         struct qcom_icc_node *qn;
154         struct icc_provider *provider;
155         u32 mode = NOC_QOS_MODE_BYPASS;
156         int rc = 0;
157
158         qn = src->data;
159         provider = src->provider;
160         qp = to_qcom_provider(provider);
161
162         if (qn->qos.qos_port < 0) {
163                 dev_dbg(src->provider->dev,
164                         "NoC QoS: Skipping %s: vote aggregated on parent.\n",
165                         qn->name);
166                 return 0;
167         }
168
169         if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
170                 mode = qn->qos.qos_mode;
171
172         if (mode == NOC_QOS_MODE_FIXED) {
173                 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
174                         qn->name);
175                 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
176                 if (rc)
177                         return rc;
178         } else if (mode == NOC_QOS_MODE_BYPASS) {
179                 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
180                         qn->name);
181         }
182
183         return regmap_update_bits(qp->regmap,
184                                   qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
185                                   NOC_QOS_MODEn_MASK, mode);
186 }
187
188 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
189 {
190         struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
191         struct qcom_icc_node *qn = node->data;
192
193         dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
194
195         switch (qp->type) {
196         case QCOM_ICC_BIMC:
197                 return qcom_icc_set_bimc_qos(node, sum_bw);
198         case QCOM_ICC_QNOC:
199                 return qcom_icc_set_qnoc_qos(node, sum_bw);
200         default:
201                 return qcom_icc_set_noc_qos(node, sum_bw);
202         }
203 }
204
205 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
206 {
207         int ret = 0;
208
209         if (mas_rpm_id != -1) {
210                 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
211                                             RPM_BUS_MASTER_REQ,
212                                             mas_rpm_id,
213                                             sum_bw);
214                 if (ret) {
215                         pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
216                                mas_rpm_id, ret);
217                         return ret;
218                 }
219         }
220
221         if (slv_rpm_id != -1) {
222                 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
223                                             RPM_BUS_SLAVE_REQ,
224                                             slv_rpm_id,
225                                             sum_bw);
226                 if (ret) {
227                         pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
228                                slv_rpm_id, ret);
229                         return ret;
230                 }
231         }
232
233         return ret;
234 }
235
236 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
237 {
238         struct qcom_icc_provider *qp;
239         struct qcom_icc_node *qn;
240         struct icc_provider *provider;
241         struct icc_node *n;
242         u64 sum_bw;
243         u64 max_peak_bw;
244         u64 rate;
245         u32 agg_avg = 0;
246         u32 agg_peak = 0;
247         int ret, i;
248
249         qn = src->data;
250         provider = src->provider;
251         qp = to_qcom_provider(provider);
252
253         list_for_each_entry(n, &provider->nodes, node_list)
254                 provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
255                                     &agg_avg, &agg_peak);
256
257         sum_bw = icc_units_to_bps(agg_avg);
258         max_peak_bw = icc_units_to_bps(agg_peak);
259
260         if (!qn->qos.ap_owned) {
261                 /* send bandwidth request message to the RPM processor */
262                 ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
263                 if (ret)
264                         return ret;
265         } else if (qn->qos.qos_mode != -1) {
266                 /* set bandwidth directly from the AP */
267                 ret = qcom_icc_qos_set(src, sum_bw);
268                 if (ret)
269                         return ret;
270         }
271
272         rate = max(sum_bw, max_peak_bw);
273
274         do_div(rate, qn->buswidth);
275         rate = min_t(u64, rate, LONG_MAX);
276
277         for (i = 0; i < qp->num_clks; i++) {
278                 if (qp->bus_clk_rate[i] == rate)
279                         continue;
280
281                 ret = clk_set_rate(qp->bus_clks[i].clk, rate);
282                 if (ret) {
283                         pr_err("%s clk_set_rate error: %d\n",
284                                qp->bus_clks[i].id, ret);
285                         return ret;
286                 }
287                 qp->bus_clk_rate[i] = rate;
288         }
289
290         return 0;
291 }
292
293 static const char * const bus_clocks[] = {
294         "bus", "bus_a",
295 };
296
297 int qnoc_probe(struct platform_device *pdev)
298 {
299         struct device *dev = &pdev->dev;
300         const struct qcom_icc_desc *desc;
301         struct icc_onecell_data *data;
302         struct icc_provider *provider;
303         struct qcom_icc_node * const *qnodes;
304         struct qcom_icc_provider *qp;
305         struct icc_node *node;
306         size_t num_nodes, i;
307         const char * const *cds;
308         int cd_num;
309         int ret;
310
311         /* wait for the RPM proxy */
312         if (!qcom_icc_rpm_smd_available())
313                 return -EPROBE_DEFER;
314
315         desc = of_device_get_match_data(dev);
316         if (!desc)
317                 return -EINVAL;
318
319         qnodes = desc->nodes;
320         num_nodes = desc->num_nodes;
321
322         if (desc->num_clocks) {
323                 cds = desc->clocks;
324                 cd_num = desc->num_clocks;
325         } else {
326                 cds = bus_clocks;
327                 cd_num = ARRAY_SIZE(bus_clocks);
328         }
329
330         qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
331         if (!qp)
332                 return -ENOMEM;
333
334         qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
335                                         GFP_KERNEL);
336         if (!qp->bus_clk_rate)
337                 return -ENOMEM;
338
339         data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
340                             GFP_KERNEL);
341         if (!data)
342                 return -ENOMEM;
343
344         for (i = 0; i < cd_num; i++)
345                 qp->bus_clks[i].id = cds[i];
346         qp->num_clks = cd_num;
347
348         qp->type = desc->type;
349         qp->qos_offset = desc->qos_offset;
350
351         if (desc->regmap_cfg) {
352                 struct resource *res;
353                 void __iomem *mmio;
354
355                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
356                 if (!res) {
357                         /* Try parent's regmap */
358                         qp->regmap = dev_get_regmap(dev->parent, NULL);
359                         if (qp->regmap)
360                                 goto regmap_done;
361                         return -ENODEV;
362                 }
363
364                 mmio = devm_ioremap_resource(dev, res);
365
366                 if (IS_ERR(mmio)) {
367                         dev_err(dev, "Cannot ioremap interconnect bus resource\n");
368                         return PTR_ERR(mmio);
369                 }
370
371                 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
372                 if (IS_ERR(qp->regmap)) {
373                         dev_err(dev, "Cannot regmap interconnect bus resource\n");
374                         return PTR_ERR(qp->regmap);
375                 }
376         }
377
378 regmap_done:
379         ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
380         if (ret)
381                 return ret;
382
383         ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
384         if (ret)
385                 return ret;
386
387         if (desc->has_bus_pd) {
388                 ret = dev_pm_domain_attach(dev, true);
389                 if (ret)
390                         return ret;
391         }
392
393         provider = &qp->provider;
394         INIT_LIST_HEAD(&provider->nodes);
395         provider->dev = dev;
396         provider->set = qcom_icc_set;
397         provider->aggregate = icc_std_aggregate;
398         provider->xlate = of_icc_xlate_onecell;
399         provider->data = data;
400
401         ret = icc_provider_add(provider);
402         if (ret) {
403                 dev_err(dev, "error adding interconnect provider: %d\n", ret);
404                 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
405                 return ret;
406         }
407
408         for (i = 0; i < num_nodes; i++) {
409                 size_t j;
410
411                 node = icc_node_create(qnodes[i]->id);
412                 if (IS_ERR(node)) {
413                         ret = PTR_ERR(node);
414                         goto err;
415                 }
416
417                 node->name = qnodes[i]->name;
418                 node->data = qnodes[i];
419                 icc_node_add(node, provider);
420
421                 for (j = 0; j < qnodes[i]->num_links; j++)
422                         icc_link_create(node, qnodes[i]->links[j]);
423
424                 data->nodes[i] = node;
425         }
426         data->num_nodes = num_nodes;
427
428         platform_set_drvdata(pdev, qp);
429
430         /* Populate child NoC devices if any */
431         if (of_get_child_count(dev->of_node) > 0)
432                 return of_platform_populate(dev->of_node, NULL, NULL, dev);
433
434         return 0;
435 err:
436         icc_nodes_remove(provider);
437         clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
438         icc_provider_del(provider);
439
440         return ret;
441 }
442 EXPORT_SYMBOL(qnoc_probe);
443
444 int qnoc_remove(struct platform_device *pdev)
445 {
446         struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
447
448         icc_nodes_remove(&qp->provider);
449         clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
450         return icc_provider_del(&qp->provider);
451 }
452 EXPORT_SYMBOL(qnoc_remove);