clk: qcom: clk-rcg2: Add support for duty-cycle for RCG
authorTaniya Das <tdas@codeaurora.org>
Sun, 25 Apr 2021 07:08:22 +0000 (12:38 +0530)
committerStephen Boyd <sboyd@kernel.org>
Wed, 26 May 2021 03:36:21 +0000 (20:36 -0700)
The root clock generators with MND divider has the capability to support
change in duty-cycle by updating the 'D'. Add the clock ops which would
check all the boundary conditions and enable setting the desired duty-cycle
as per the consumer.

Signed-off-by: Taniya Das <tdas@codeaurora.org>
Link: https://lore.kernel.org/r/1619334502-9880-2-git-send-email-tdas@codeaurora.org
[sboyd@kernel.org: Remove _val everywhere]
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
drivers/clk/qcom/clk-rcg2.c

index 05ff3b0..e1b1b42 100644 (file)
@@ -357,6 +357,83 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
        return __clk_rcg2_set_rate(hw, rate, FLOOR);
 }
 
+static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+{
+       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+       u32 notn_m, n, m, d, not2d, mask;
+
+       if (!rcg->mnd_width) {
+               /* 50 % duty-cycle for Non-MND RCGs */
+               duty->num = 1;
+               duty->den = 2;
+               return 0;
+       }
+
+       regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
+       regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
+       regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
+
+       if (!not2d && !m && !notn_m) {
+               /* 50 % duty-cycle always */
+               duty->num = 1;
+               duty->den = 2;
+               return 0;
+       }
+
+       mask = BIT(rcg->mnd_width) - 1;
+
+       d = ~(not2d) & mask;
+       d = DIV_ROUND_CLOSEST(d, 2);
+
+       n = (~(notn_m) + m) & mask;
+
+       duty->num = d;
+       duty->den = n;
+
+       return 0;
+}
+
+static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+{
+       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+       u32 notn_m, n, m, d, not2d, mask, duty_per;
+       int ret;
+
+       /* Duty-cycle cannot be modified for non-MND RCGs */
+       if (!rcg->mnd_width)
+               return -EINVAL;
+
+       mask = BIT(rcg->mnd_width) - 1;
+
+       regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
+       regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
+
+       n = (~(notn_m) + m) & mask;
+
+       duty_per = (duty->num * 100) / duty->den;
+
+       /* Calculate 2d value */
+       d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
+
+        /* Check bit widths of 2d. If D is too big reduce duty cycle. */
+       if (d > mask)
+               d = mask;
+
+       if ((d / 2) > (n - m))
+               d = (n - m) * 2;
+       else if ((d / 2) < (m / 2))
+               d = m;
+
+       not2d = ~d & mask;
+
+       ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
+                                not2d);
+       if (ret)
+               return ret;
+
+       return update_config(rcg);
+}
+
 const struct clk_ops clk_rcg2_ops = {
        .is_enabled = clk_rcg2_is_enabled,
        .get_parent = clk_rcg2_get_parent,
@@ -365,6 +442,8 @@ const struct clk_ops clk_rcg2_ops = {
        .determine_rate = clk_rcg2_determine_rate,
        .set_rate = clk_rcg2_set_rate,
        .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+       .get_duty_cycle = clk_rcg2_get_duty_cycle,
+       .set_duty_cycle = clk_rcg2_set_duty_cycle,
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
 
@@ -376,6 +455,8 @@ const struct clk_ops clk_rcg2_floor_ops = {
        .determine_rate = clk_rcg2_determine_floor_rate,
        .set_rate = clk_rcg2_set_floor_rate,
        .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+       .get_duty_cycle = clk_rcg2_get_duty_cycle,
+       .set_duty_cycle = clk_rcg2_set_duty_cycle,
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);