1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2019 Xilinx, Inc.
4 * Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
8 #include <linux/bitops.h>
9 #include <linux/bitfield.h>
11 #include <clk-uclass.h>
14 #include <asm/arch/sys_proto.h>
15 #include <zynqmp_firmware.h>
16 #include <linux/err.h>
18 #define MAX_PARENT 100
20 #define MAX_NAME_LEN 50
22 #define CLK_TYPE_SHIFT 2
24 #define PM_API_PAYLOAD_LEN 3
26 #define NA_PARENT 0xFFFFFFFF
27 #define DUMMY_PARENT 0xFFFFFFFE
29 #define CLK_TYPE_FIELD_LEN 4
30 #define CLK_TOPOLOGY_NODE_OFFSET 16
31 #define NODES_PER_RESP 3
33 #define CLK_TYPE_FIELD_MASK 0xF
34 #define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
35 #define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
36 #define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
37 #define CLK_TYPE_FLAG_BITS 8
39 #define CLK_PARENTS_ID_LEN 16
40 #define CLK_PARENTS_ID_MASK 0xFFFF
42 #define END_OF_TOPOLOGY_NODE 1
43 #define END_OF_PARENTS 1
45 #define CLK_VALID_MASK 0x1
46 #define NODE_CLASS_SHIFT 26U
47 #define NODE_SUBCLASS_SHIFT 20U
48 #define NODE_TYPE_SHIFT 14U
49 #define NODE_INDEX_SHIFT 0U
51 #define CLK_GET_NAME_RESP_LEN 16
52 #define CLK_GET_TOPOLOGY_RESP_WORDS 3
53 #define CLK_GET_PARENTS_RESP_WORDS 3
54 #define CLK_GET_ATTR_RESP_WORDS 1
56 #define NODE_SUBCLASS_CLOCK_PLL 1
57 #define NODE_SUBCLASS_CLOCK_OUT 2
58 #define NODE_SUBCLASS_CLOCK_REF 3
60 #define NODE_CLASS_CLOCK 2
61 #define NODE_CLASS_MASK 0x3F
63 #define CLOCK_NODE_TYPE_MUX 1
64 #define CLOCK_NODE_TYPE_DIV 4
65 #define CLOCK_NODE_TYPE_GATE 6
69 PM_QID_CLOCK_GET_NAME,
70 PM_QID_CLOCK_GET_TOPOLOGY,
71 PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
72 PM_QID_CLOCK_GET_PARENTS,
73 PM_QID_CLOCK_GET_ATTRIBUTES,
74 PM_QID_PINCTRL_GET_NUM_PINS,
75 PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
76 PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
77 PM_QID_PINCTRL_GET_FUNCTION_NAME,
78 PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
79 PM_QID_PINCTRL_GET_PIN_GROUPS,
80 PM_QID_CLOCK_GET_NUM_CLOCKS,
81 PM_QID_CLOCK_GET_MAX_DIVISOR,
90 char name[MAX_NAME_LEN];
95 struct clock_topology {
101 struct versal_clock {
102 char clk_name[MAX_NAME_LEN];
105 struct clock_topology node[MAX_NODES];
107 struct clock_parent parent[MAX_PARENT];
112 struct versal_clk_priv {
113 struct versal_clock *clk;
116 static ulong alt_ref_clk;
117 static ulong pl_alt_ref_clk;
118 static ulong ref_clk;
120 struct versal_pm_query_data {
127 static struct versal_clock *clock;
128 static unsigned int clock_max_idx;
130 #define PM_QUERY_DATA 35
132 static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
136 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
137 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
138 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
143 ret_payload[0] = (u32)regs.regs[0];
144 ret_payload[1] = upper_32_bits(regs.regs[0]);
145 ret_payload[2] = (u32)regs.regs[1];
146 ret_payload[3] = upper_32_bits(regs.regs[1]);
147 ret_payload[4] = (u32)regs.regs[2];
150 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
153 static inline int versal_is_valid_clock(u32 clk_id)
155 if (clk_id >= clock_max_idx)
158 return clock[clk_id].valid;
161 static int versal_get_clock_name(u32 clk_id, char *clk_name)
165 ret = versal_is_valid_clock(clk_id);
167 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
171 return ret == 0 ? -EINVAL : ret;
174 static int versal_get_clock_type(u32 clk_id, u32 *type)
178 ret = versal_is_valid_clock(clk_id);
180 *type = clock[clk_id].type;
184 return ret == 0 ? -EINVAL : ret;
187 static int versal_pm_clock_get_num_clocks(u32 *nclocks)
189 struct versal_pm_query_data qdata = {0};
190 u32 ret_payload[PAYLOAD_ARG_CNT];
193 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
195 ret = versal_pm_query(qdata, ret_payload);
196 *nclocks = ret_payload[1];
201 static int versal_pm_clock_get_name(u32 clock_id, char *name)
203 struct versal_pm_query_data qdata = {0};
204 u32 ret_payload[PAYLOAD_ARG_CNT];
207 qdata.qid = PM_QID_CLOCK_GET_NAME;
208 qdata.arg1 = clock_id;
210 ret = versal_pm_query(qdata, ret_payload);
213 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
218 static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
220 struct versal_pm_query_data qdata = {0};
221 u32 ret_payload[PAYLOAD_ARG_CNT];
224 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
225 qdata.arg1 = clock_id;
228 ret = versal_pm_query(qdata, ret_payload);
229 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
234 static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
236 struct versal_pm_query_data qdata = {0};
237 u32 ret_payload[PAYLOAD_ARG_CNT];
240 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
241 qdata.arg1 = clock_id;
244 ret = versal_pm_query(qdata, ret_payload);
245 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
250 static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
252 struct versal_pm_query_data qdata = {0};
253 u32 ret_payload[PAYLOAD_ARG_CNT];
256 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
257 qdata.arg1 = clock_id;
259 ret = versal_pm_query(qdata, ret_payload);
260 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
265 static int __versal_clock_get_topology(struct clock_topology *topology,
266 u32 *data, u32 *nnodes)
270 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
271 if (!(data[i] & CLK_TYPE_FIELD_MASK))
272 return END_OF_TOPOLOGY_NODE;
273 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
274 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
276 topology[*nnodes].type_flag =
277 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
278 topology[*nnodes].type_flag |=
279 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
281 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
282 topology[*nnodes].type, topology[*nnodes].flag,
283 topology[*nnodes].type_flag);
290 static int versal_clock_get_topology(u32 clk_id,
291 struct clock_topology *topology,
295 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
298 for (j = 0; j <= MAX_NODES; j += 3) {
299 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
303 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
304 if (ret == END_OF_TOPOLOGY_NODE)
311 static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
315 struct clock_parent *parent;
317 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
318 if (data[i] == NA_PARENT)
319 return END_OF_PARENTS;
321 parent = &parents[i];
322 parent->id = data[i] & CLK_PARENTS_ID_MASK;
323 if (data[i] == DUMMY_PARENT) {
324 strcpy(parent->name, "dummy_name");
327 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
328 if (versal_get_clock_name(parent->id, parent->name))
331 debug("parent name:%s\n", parent->name);
338 static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
342 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
346 /* Get parents from firmware */
347 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
352 ret = __versal_clock_get_parents(&parents[j], pm_resp,
354 if (ret == END_OF_PARENTS)
356 j += PM_API_PAYLOAD_LEN;
357 } while (*num_parents <= MAX_PARENT);
362 static u32 versal_clock_get_div(u32 clk_id)
364 u32 ret_payload[PAYLOAD_ARG_CNT];
367 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
368 div = ret_payload[1];
373 static u32 versal_clock_set_div(u32 clk_id, u32 div)
375 u32 ret_payload[PAYLOAD_ARG_CNT];
377 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
382 static u64 versal_clock_ref(u32 clk_id)
384 u32 ret_payload[PAYLOAD_ARG_CNT];
387 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0, ret_payload);
388 ref = ret_payload[0];
392 return pl_alt_ref_clk;
396 static u64 versal_clock_get_pll_rate(u32 clk_id)
398 u32 ret_payload[PAYLOAD_ARG_CNT];
403 u32 parent_rate, parent_id;
404 u32 id = clk_id & 0xFFF;
406 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
407 res = ret_payload[1];
409 printf("0%x PLL not enabled\n", clk_id);
413 parent_id = clock[clock[id].parent[0].id].clk_id;
414 parent_rate = versal_clock_ref(parent_id);
416 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
417 fbdiv = ret_payload[1];
418 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
419 frac = ret_payload[1];
421 freq = (fbdiv * parent_rate) >> (1 << frac);
426 static u32 versal_clock_mux(u32 clk_id)
429 u32 id = clk_id & 0xFFF;
431 for (i = 0; i < clock[id].num_nodes; i++)
432 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
438 static u32 versal_clock_get_parentid(u32 clk_id)
441 u32 ret_payload[PAYLOAD_ARG_CNT];
442 u32 id = clk_id & 0xFFF;
444 if (versal_clock_mux(clk_id)) {
445 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
447 parent_id = ret_payload[1];
450 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
451 return clock[clock[id].parent[parent_id].id].clk_id;
454 static u32 versal_clock_gate(u32 clk_id)
456 u32 id = clk_id & 0xFFF;
459 for (i = 0; i < clock[id].num_nodes; i++)
460 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
466 static u32 versal_clock_div(u32 clk_id)
469 u32 id = clk_id & 0xFFF;
471 for (i = 0; i < clock[id].num_nodes; i++)
472 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
478 static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
480 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
481 NODE_SUBCLASS_CLOCK_PLL &&
482 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
484 *clk_rate = versal_clock_get_pll_rate(clk_id);
491 static u64 versal_clock_calc(u32 clk_id)
497 if (versal_clock_pll(clk_id, &clk_rate))
500 parent_id = versal_clock_get_parentid(clk_id);
501 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
502 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
503 return versal_clock_ref(clk_id);
505 clk_rate = versal_clock_calc(parent_id);
507 if (versal_clock_div(clk_id)) {
508 div = versal_clock_get_div(clk_id);
509 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
515 static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
517 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
518 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
519 *clk_rate = versal_clock_ref(clk_id);
521 if (versal_clock_pll(clk_id, clk_rate))
524 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
525 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
526 ((clk_id >> NODE_CLASS_SHIFT) &
527 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
528 if (!versal_clock_gate(clk_id))
530 *clk_rate = versal_clock_calc(clk_id);
537 int soc_clk_dump(void)
540 u32 type, ret, i = 0;
542 printf("\n ****** VERSAL CLOCKS *****\n");
544 printf("alt_ref_clk:%ld pl_alt_ref_clk:%ld ref_clk:%ld\n",
545 alt_ref_clk, pl_alt_ref_clk, ref_clk);
546 for (i = 0; i < clock_max_idx; i++) {
547 debug("%s\n", clock[i].clk_name);
548 ret = versal_get_clock_type(i, &type);
549 if (ret || type != CLK_TYPE_OUTPUT)
552 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
555 printf("clk: %s freq:%lld\n",
556 clock[i].clk_name, clk_rate);
562 static void versal_get_clock_info(void)
565 u32 attr, type = 0, nodetype, subclass, class;
567 for (i = 0; i < clock_max_idx; i++) {
568 ret = versal_pm_clock_get_attributes(i, &attr);
572 clock[i].valid = attr & CLK_VALID_MASK;
573 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
574 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
575 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
576 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
577 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
579 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
580 (subclass << NODE_SUBCLASS_SHIFT) |
581 (nodetype << NODE_TYPE_SHIFT) |
582 (i << NODE_INDEX_SHIFT);
584 ret = versal_pm_clock_get_name(clock[i].clk_id,
588 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
589 clock[i].clk_name, clock[i].valid,
590 clock[i].type, clock[i].clk_id);
593 /* Get topology of all clock */
594 for (i = 0; i < clock_max_idx; i++) {
595 ret = versal_get_clock_type(i, &type);
596 if (ret || type != CLK_TYPE_OUTPUT)
598 debug("clk name:%s\n", clock[i].clk_name);
599 ret = versal_clock_get_topology(i, clock[i].node,
600 &clock[i].num_nodes);
604 ret = versal_clock_get_parents(i, clock[i].parent,
605 &clock[i].num_parents);
611 int versal_clock_setup(void)
615 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
619 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
620 clock = calloc(clock_max_idx, sizeof(*clock));
624 versal_get_clock_info();
629 static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
635 ret = clk_get_by_name(dev, name, &clk);
637 dev_err(dev, "failed to get %s\n", name);
641 *freq = clk_get_rate(&clk);
642 if (IS_ERR_VALUE(*freq)) {
643 dev_err(dev, "failed to get rate %s\n", name);
650 static int versal_clk_probe(struct udevice *dev)
653 struct versal_clk_priv *priv = dev_get_priv(dev);
655 debug("%s\n", __func__);
657 ret = versal_clock_get_freq_by_name("alt_ref_clk", dev, &alt_ref_clk);
661 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
662 dev, &pl_alt_ref_clk);
666 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
670 versal_clock_setup();
677 static ulong versal_clk_get_rate(struct clk *clk)
679 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
684 debug("%s\n", __func__);
686 clk_id = priv->clk[id].clk_id;
688 versal_clock_get_rate(clk_id, &clk_rate);
693 static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
695 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
702 debug("%s\n", __func__);
704 clk_id = priv->clk[id].clk_id;
706 ret = versal_clock_get_rate(clk_id, &clk_rate);
708 printf("Clock is not a Gate:0x%x\n", clk_id);
713 if (versal_clock_div(clk_id)) {
714 div = versal_clock_get_div(clk_id);
716 div = DIV_ROUND_CLOSEST(clk_rate, rate);
717 versal_clock_set_div(clk_id, div);
718 debug("%s, div:%d, newrate:%lld\n", __func__,
719 div, DIV_ROUND_CLOSEST(clk_rate, div));
720 return DIV_ROUND_CLOSEST(clk_rate, div);
722 clk_id = versal_clock_get_parentid(clk_id);
723 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
724 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
726 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
731 static struct clk_ops versal_clk_ops = {
732 .set_rate = versal_clk_set_rate,
733 .get_rate = versal_clk_get_rate,
736 static const struct udevice_id versal_clk_ids[] = {
737 { .compatible = "xlnx,versal-clk" },
741 U_BOOT_DRIVER(versal_clk) = {
742 .name = "versal-clk",
744 .of_match = versal_clk_ids,
745 .probe = versal_clk_probe,
746 .ops = &versal_clk_ops,
747 .priv_auto_alloc_size = sizeof(struct versal_clk_priv),