1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
7 * Each of the CPU clusters (Power and Perf) on msm8996 are
8 * clocked via 2 PLLs, a primary and alternate. There are also
9 * 2 Mux'es, a primary and secondary all connected together
14 * +------------------>0 |
19 * | +-------+ | +-------+
22 * +---------------+ | +----------->1 | CPU clk
23 * |Primary PLL +----+ PLL_EARLY | | +------>
24 * | +------+-----------+ +------>2 PMUX |
25 * +---------------+ | | | |
26 * | +------+ | +-->3 |
27 * +--^+ ACD +-----+ | +-------+
28 * +---------------+ +------+ |
30 * | +---------------------------+
31 * +---------------+ PLL_EARLY
33 * The primary PLL is what drives the CPU clk, except for times
34 * when we are reprogramming the PLL itself (for rate changes) when
35 * we temporarily switch to an alternate PLL.
37 * The primary PLL operates on a single VCO range, between 600MHz
38 * and 3GHz. However the CPUs do support OPPs with frequencies
39 * between 300MHz and 600MHz. In order to support running the CPUs
40 * at those frequencies we end up having to lock the PLL at twice
41 * the rate and drive the CPU clk via the PLL/2 output and SMUX.
43 * So for frequencies above 600MHz we follow the following path
44 * Primary PLL --> PLL_EARLY --> PMUX(1) --> CPU clk
45 * and for frequencies between 300MHz and 600MHz we follow
46 * Primary PLL --> PLL/2 --> SMUX(1) --> PMUX(0) --> CPU clk
48 * ACD stands for Adaptive Clock Distribution and is used to
49 * detect voltage droops.
52 #include <linux/bitfield.h>
53 #include <linux/clk.h>
54 #include <linux/clk-provider.h>
56 #include <linux/module.h>
57 #include <linux/platform_device.h>
58 #include <linux/regmap.h>
59 #include <soc/qcom/kryo-l2-accessors.h>
61 #include "clk-alpha-pll.h"
62 #include "clk-regmap.h"
63 #include "clk-regmap-mux.h"
73 #define DIV_2_THRESHOLD 600000000
74 #define PWRCL_REG_OFFSET 0x0
75 #define PERFCL_REG_OFFSET 0x80000
76 #define MUX_OFFSET 0x40
77 #define ALT_PLL_OFFSET 0x100
78 #define SSSCTL_OFFSET 0x160
82 static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
83 [PLL_OFF_L_VAL] = 0x04,
84 [PLL_OFF_ALPHA_VAL] = 0x08,
85 [PLL_OFF_USER_CTL] = 0x10,
86 [PLL_OFF_CONFIG_CTL] = 0x18,
87 [PLL_OFF_CONFIG_CTL_U] = 0x1c,
88 [PLL_OFF_TEST_CTL] = 0x20,
89 [PLL_OFF_TEST_CTL_U] = 0x24,
90 [PLL_OFF_STATUS] = 0x28,
93 static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = {
94 [PLL_OFF_L_VAL] = 0x04,
95 [PLL_OFF_ALPHA_VAL] = 0x08,
96 [PLL_OFF_ALPHA_VAL_U] = 0x0c,
97 [PLL_OFF_USER_CTL] = 0x10,
98 [PLL_OFF_USER_CTL_U] = 0x14,
99 [PLL_OFF_CONFIG_CTL] = 0x18,
100 [PLL_OFF_TEST_CTL] = 0x20,
101 [PLL_OFF_TEST_CTL_U] = 0x24,
102 [PLL_OFF_STATUS] = 0x28,
107 static const struct alpha_pll_config hfpll_config = {
109 .config_ctl_val = 0x200d4aa8,
110 .config_ctl_hi_val = 0x006,
111 .pre_div_mask = BIT(12),
112 .post_div_mask = 0x3 << 8,
113 .post_div_val = 0x1 << 8,
114 .main_output_mask = BIT(0),
115 .early_output_mask = BIT(3),
118 static const struct clk_parent_data pll_parent[] = {
122 static struct clk_alpha_pll pwrcl_pll = {
123 .offset = PWRCL_REG_OFFSET,
124 .regs = prim_pll_regs,
125 .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
126 .clkr.hw.init = &(struct clk_init_data){
128 .parent_data = pll_parent,
129 .num_parents = ARRAY_SIZE(pll_parent),
130 .ops = &clk_alpha_pll_huayra_ops,
134 static struct clk_alpha_pll perfcl_pll = {
135 .offset = PERFCL_REG_OFFSET,
136 .regs = prim_pll_regs,
137 .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
138 .clkr.hw.init = &(struct clk_init_data){
139 .name = "perfcl_pll",
140 .parent_data = pll_parent,
141 .num_parents = ARRAY_SIZE(pll_parent),
142 .ops = &clk_alpha_pll_huayra_ops,
146 static struct clk_fixed_factor pwrcl_pll_postdiv = {
149 .hw.init = &(struct clk_init_data){
150 .name = "pwrcl_pll_postdiv",
151 .parent_data = &(const struct clk_parent_data){
152 .hw = &pwrcl_pll.clkr.hw
155 .ops = &clk_fixed_factor_ops,
156 .flags = CLK_SET_RATE_PARENT,
160 static struct clk_fixed_factor perfcl_pll_postdiv = {
163 .hw.init = &(struct clk_init_data){
164 .name = "perfcl_pll_postdiv",
165 .parent_data = &(const struct clk_parent_data){
166 .hw = &perfcl_pll.clkr.hw
169 .ops = &clk_fixed_factor_ops,
170 .flags = CLK_SET_RATE_PARENT,
174 static struct clk_fixed_factor perfcl_pll_acd = {
177 .hw.init = &(struct clk_init_data){
178 .name = "perfcl_pll_acd",
179 .parent_data = &(const struct clk_parent_data){
180 .hw = &perfcl_pll.clkr.hw
183 .ops = &clk_fixed_factor_ops,
184 .flags = CLK_SET_RATE_PARENT,
188 static struct clk_fixed_factor pwrcl_pll_acd = {
191 .hw.init = &(struct clk_init_data){
192 .name = "pwrcl_pll_acd",
193 .parent_data = &(const struct clk_parent_data){
194 .hw = &pwrcl_pll.clkr.hw
197 .ops = &clk_fixed_factor_ops,
198 .flags = CLK_SET_RATE_PARENT,
202 static const struct pll_vco alt_pll_vco_modes[] = {
203 VCO(3, 250000000, 500000000),
204 VCO(2, 500000000, 750000000),
205 VCO(1, 750000000, 1000000000),
206 VCO(0, 1000000000, 2150400000),
209 static const struct alpha_pll_config altpll_config = {
211 .vco_val = 0x3 << 20,
212 .vco_mask = 0x3 << 20,
213 .config_ctl_val = 0x4001051b,
214 .post_div_mask = 0x3 << 8,
215 .post_div_val = 0x1 << 8,
216 .main_output_mask = BIT(0),
217 .early_output_mask = BIT(3),
220 static struct clk_alpha_pll pwrcl_alt_pll = {
221 .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
222 .regs = alt_pll_regs,
223 .vco_table = alt_pll_vco_modes,
224 .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
225 .flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
226 .clkr.hw.init = &(struct clk_init_data) {
227 .name = "pwrcl_alt_pll",
228 .parent_data = pll_parent,
229 .num_parents = ARRAY_SIZE(pll_parent),
230 .ops = &clk_alpha_pll_hwfsm_ops,
234 static struct clk_alpha_pll perfcl_alt_pll = {
235 .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
236 .regs = alt_pll_regs,
237 .vco_table = alt_pll_vco_modes,
238 .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
239 .flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
240 .clkr.hw.init = &(struct clk_init_data) {
241 .name = "perfcl_alt_pll",
242 .parent_data = pll_parent,
243 .num_parents = ARRAY_SIZE(pll_parent),
244 .ops = &clk_alpha_pll_hwfsm_ops,
248 struct clk_cpu_8996_pmux {
250 struct notifier_block nb;
251 struct clk_regmap clkr;
254 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
257 #define to_clk_cpu_8996_pmux_nb(_nb) \
258 container_of(_nb, struct clk_cpu_8996_pmux, nb)
260 static inline struct clk_cpu_8996_pmux *to_clk_cpu_8996_pmux_hw(struct clk_hw *hw)
262 return container_of(to_clk_regmap(hw), struct clk_cpu_8996_pmux, clkr);
265 static u8 clk_cpu_8996_pmux_get_parent(struct clk_hw *hw)
267 struct clk_regmap *clkr = to_clk_regmap(hw);
268 struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
271 regmap_read(clkr->regmap, cpuclk->reg, &val);
273 return FIELD_GET(PMUX_MASK, val);
276 static int clk_cpu_8996_pmux_set_parent(struct clk_hw *hw, u8 index)
278 struct clk_regmap *clkr = to_clk_regmap(hw);
279 struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
282 val = FIELD_PREP(PMUX_MASK, index);
284 return regmap_update_bits(clkr->regmap, cpuclk->reg, PMUX_MASK, val);
287 static int clk_cpu_8996_pmux_determine_rate(struct clk_hw *hw,
288 struct clk_rate_request *req)
290 struct clk_hw *parent;
292 if (req->rate < (DIV_2_THRESHOLD / 2))
295 if (req->rate < DIV_2_THRESHOLD)
296 parent = clk_hw_get_parent_by_index(hw, SMUX_INDEX);
298 parent = clk_hw_get_parent_by_index(hw, ACD_INDEX);
302 req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
303 req->best_parent_hw = parent;
308 static const struct clk_ops clk_cpu_8996_pmux_ops = {
309 .set_parent = clk_cpu_8996_pmux_set_parent,
310 .get_parent = clk_cpu_8996_pmux_get_parent,
311 .determine_rate = clk_cpu_8996_pmux_determine_rate,
314 static const struct clk_parent_data pwrcl_smux_parents[] = {
316 { .hw = &pwrcl_pll_postdiv.hw },
319 static const struct clk_parent_data perfcl_smux_parents[] = {
321 { .hw = &perfcl_pll_postdiv.hw },
324 static struct clk_regmap_mux pwrcl_smux = {
325 .reg = PWRCL_REG_OFFSET + MUX_OFFSET,
328 .clkr.hw.init = &(struct clk_init_data) {
329 .name = "pwrcl_smux",
330 .parent_data = pwrcl_smux_parents,
331 .num_parents = ARRAY_SIZE(pwrcl_smux_parents),
332 .ops = &clk_regmap_mux_closest_ops,
333 .flags = CLK_SET_RATE_PARENT,
337 static struct clk_regmap_mux perfcl_smux = {
338 .reg = PERFCL_REG_OFFSET + MUX_OFFSET,
341 .clkr.hw.init = &(struct clk_init_data) {
342 .name = "perfcl_smux",
343 .parent_data = perfcl_smux_parents,
344 .num_parents = ARRAY_SIZE(perfcl_smux_parents),
345 .ops = &clk_regmap_mux_closest_ops,
346 .flags = CLK_SET_RATE_PARENT,
350 static const struct clk_hw *pwrcl_pmux_parents[] = {
351 [SMUX_INDEX] = &pwrcl_smux.clkr.hw,
352 [PLL_INDEX] = &pwrcl_pll.clkr.hw,
353 [ACD_INDEX] = &pwrcl_pll_acd.hw,
354 [ALT_INDEX] = &pwrcl_alt_pll.clkr.hw,
357 static const struct clk_hw *perfcl_pmux_parents[] = {
358 [SMUX_INDEX] = &perfcl_smux.clkr.hw,
359 [PLL_INDEX] = &perfcl_pll.clkr.hw,
360 [ACD_INDEX] = &perfcl_pll_acd.hw,
361 [ALT_INDEX] = &perfcl_alt_pll.clkr.hw,
364 static struct clk_cpu_8996_pmux pwrcl_pmux = {
365 .reg = PWRCL_REG_OFFSET + MUX_OFFSET,
366 .nb.notifier_call = cpu_clk_notifier_cb,
367 .clkr.hw.init = &(struct clk_init_data) {
368 .name = "pwrcl_pmux",
369 .parent_hws = pwrcl_pmux_parents,
370 .num_parents = ARRAY_SIZE(pwrcl_pmux_parents),
371 .ops = &clk_cpu_8996_pmux_ops,
372 /* CPU clock is critical and should never be gated */
373 .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
377 static struct clk_cpu_8996_pmux perfcl_pmux = {
378 .reg = PERFCL_REG_OFFSET + MUX_OFFSET,
379 .nb.notifier_call = cpu_clk_notifier_cb,
380 .clkr.hw.init = &(struct clk_init_data) {
381 .name = "perfcl_pmux",
382 .parent_hws = perfcl_pmux_parents,
383 .num_parents = ARRAY_SIZE(perfcl_pmux_parents),
384 .ops = &clk_cpu_8996_pmux_ops,
385 /* CPU clock is critical and should never be gated */
386 .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
390 static const struct regmap_config cpu_msm8996_regmap_config = {
394 .max_register = 0x80210,
396 .val_format_endian = REGMAP_ENDIAN_LITTLE,
399 static struct clk_hw *cpu_msm8996_hw_clks[] = {
400 &pwrcl_pll_postdiv.hw,
401 &perfcl_pll_postdiv.hw,
406 static struct clk_regmap *cpu_msm8996_clks[] = {
410 &perfcl_alt_pll.clkr,
417 static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
418 struct regmap *regmap)
422 for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
423 ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
428 for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) {
429 ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]);
434 clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
435 clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
436 clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
437 clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
439 /* Enable alt PLLs */
440 clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
441 clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
443 devm_clk_notifier_register(dev, pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
444 devm_clk_notifier_register(dev, perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
449 #define CPU_AFINITY_MASK 0xFFF
450 #define PWRCL_CPU_REG_MASK 0x3
451 #define PERFCL_CPU_REG_MASK 0x103
453 #define L2ACDCR_REG 0x580ULL
454 #define L2ACDTD_REG 0x581ULL
455 #define L2ACDDVMRC_REG 0x584ULL
456 #define L2ACDSSCR_REG 0x589ULL
458 static DEFINE_SPINLOCK(qcom_clk_acd_lock);
459 static void __iomem *base;
461 static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base)
466 spin_lock_irqsave(&qcom_clk_acd_lock, flags);
468 hwid = read_cpuid_mpidr() & CPU_AFINITY_MASK;
470 kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11);
471 kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f);
472 kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601);
474 if (PWRCL_CPU_REG_MASK == (hwid | PWRCL_CPU_REG_MASK)) {
475 writel(0xf, base + PWRCL_REG_OFFSET + SSSCTL_OFFSET);
476 kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
479 if (PERFCL_CPU_REG_MASK == (hwid | PERFCL_CPU_REG_MASK)) {
480 kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
481 writel(0xf, base + PERFCL_REG_OFFSET + SSSCTL_OFFSET);
484 spin_unlock_irqrestore(&qcom_clk_acd_lock, flags);
487 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
490 struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
491 struct clk_notifier_data *cnd = data;
495 case PRE_RATE_CHANGE:
496 ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
497 qcom_cpu_clk_msm8996_acd_init(base);
499 case POST_RATE_CHANGE:
500 if (cnd->new_rate < DIV_2_THRESHOLD)
501 ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
504 ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
512 return notifier_from_errno(ret);
515 static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
517 struct regmap *regmap;
518 struct clk_hw_onecell_data *data;
519 struct device *dev = &pdev->dev;
522 data = devm_kzalloc(dev, struct_size(data, hws, 2), GFP_KERNEL);
526 base = devm_platform_ioremap_resource(pdev, 0);
528 return PTR_ERR(base);
530 regmap = devm_regmap_init_mmio(dev, base, &cpu_msm8996_regmap_config);
532 return PTR_ERR(regmap);
534 ret = qcom_cpu_clk_msm8996_register_clks(dev, regmap);
538 qcom_cpu_clk_msm8996_acd_init(base);
540 data->hws[0] = &pwrcl_pmux.clkr.hw;
541 data->hws[1] = &perfcl_pmux.clkr.hw;
544 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
547 static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = {
548 { .compatible = "qcom,msm8996-apcc" },
551 MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table);
553 static struct platform_driver qcom_cpu_clk_msm8996_driver = {
554 .probe = qcom_cpu_clk_msm8996_driver_probe,
556 .name = "qcom-msm8996-apcc",
557 .of_match_table = qcom_cpu_clk_msm8996_match_table,
560 module_platform_driver(qcom_cpu_clk_msm8996_driver);
562 MODULE_DESCRIPTION("QCOM MSM8996 CPU Clock Driver");
563 MODULE_LICENSE("GPL v2");