cpufreq: scmi: Use .register_em() to register with energy model
authorViresh Kumar <viresh.kumar@linaro.org>
Tue, 10 Aug 2021 06:54:36 +0000 (12:24 +0530)
committerViresh Kumar <viresh.kumar@linaro.org>
Mon, 30 Aug 2021 05:13:00 +0000 (10:43 +0530)
Set the newly added .register_em() callback to register with the EM
after the cpufreq policy is properly initialized.

Acked-by: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
drivers/cpufreq/scmi-cpufreq.c

index 75f818d04b481a1d0f250457f0557de464cf908a..1e0cd4d165f043cf76228fde27f10a9c3a5f413b 100644 (file)
@@ -22,7 +22,9 @@
 
 struct scmi_data {
        int domain_id;
+       int nr_opp;
        struct device *cpu_dev;
+       cpumask_var_t opp_shared_cpus;
 };
 
 static struct scmi_protocol_handle *ph;
@@ -123,9 +125,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
        struct device *cpu_dev;
        struct scmi_data *priv;
        struct cpufreq_frequency_table *freq_table;
-       struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
-       cpumask_var_t opp_shared_cpus;
-       bool power_scale_mw;
 
        cpu_dev = get_cpu_device(policy->cpu);
        if (!cpu_dev) {
@@ -133,9 +132,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
                return -ENOMEM;
 
+       if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto out_free_priv;
+       }
+
        /* Obtain CPUs that share SCMI performance controls */
        ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
        if (ret) {
@@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
         * The OPP 'sharing cpus' info may come from DT through an empty opp
         * table and opp-shared.
         */
-       ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
-       if (ret || !cpumask_weight(opp_shared_cpus)) {
+       ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+       if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
                /*
                 * Either opp-table is not set or no opp-shared was found.
                 * Use the CPU mask from SCMI to designate CPUs sharing an OPP
                 * table.
                 */
-               cpumask_copy(opp_shared_cpus, policy->cpus);
+               cpumask_copy(priv->opp_shared_cpus, policy->cpus);
        }
 
         /*
@@ -180,7 +185,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
                        goto out_free_opp;
                }
 
-               ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
+               ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
                if (ret) {
                        dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
                                __func__, ret);
@@ -188,21 +193,13 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
                        goto out_free_opp;
                }
 
-               power_scale_mw = perf_ops->power_scale_mw_get(ph);
-               em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
-                                           opp_shared_cpus, power_scale_mw);
-       }
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               ret = -ENOMEM;
-               goto out_free_opp;
+               priv->nr_opp = nr_opp;
        }
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
-               goto out_free_priv;
+               goto out_free_opp;
        }
 
        priv->cpu_dev = cpu_dev;
@@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
        policy->fast_switch_possible =
                perf_ops->fast_switch_possible(ph, cpu_dev);
 
-       free_cpumask_var(opp_shared_cpus);
        return 0;
 
-out_free_priv:
-       kfree(priv);
-
 out_free_opp:
        dev_pm_opp_remove_all_dynamic(cpu_dev);
 
 out_free_cpumask:
-       free_cpumask_var(opp_shared_cpus);
+       free_cpumask_var(priv->opp_shared_cpus);
+
+out_free_priv:
+       kfree(priv);
 
        return ret;
 }
@@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
 
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+       free_cpumask_var(priv->opp_shared_cpus);
        kfree(priv);
 
        return 0;
 }
 
+static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+       struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+       bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
+       struct scmi_data *priv = policy->driver_data;
+
+       /*
+        * This callback will be called for each policy, but we don't need to
+        * register with EM every time. Despite not being part of the same
+        * policy, some CPUs may still share their perf-domains, and a CPU from
+        * another policy may already have registered with EM on behalf of CPUs
+        * of this policy.
+        */
+       if (!priv->nr_opp)
+               return;
+
+       em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
+                                   &em_cb, priv->opp_shared_cpus,
+                                   power_scale_mw);
+}
+
 static struct cpufreq_driver scmi_cpufreq_driver = {
        .name   = "scmi",
        .flags  = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
        .get    = scmi_cpufreq_get_rate,
        .init   = scmi_cpufreq_init,
        .exit   = scmi_cpufreq_exit,
+       .register_em    = scmi_cpufreq_register_em,
 };
 
 static int scmi_cpufreq_probe(struct scmi_device *sdev)