struct scmi_data {
int domain_id;
+ int nr_opp;
struct device *cpu_dev;
+ cpumask_var_t opp_shared_cpus;
};
static struct scmi_protocol_handle *ph;
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
- struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
- cpumask_var_t opp_shared_cpus;
- bool power_scale_mw;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
return -ENODEV;
}
- if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
* The OPP 'sharing cpus' info may come from DT through an empty opp
* table and opp-shared.
*/
- ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
- if (ret || !cpumask_weight(opp_shared_cpus)) {
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+ if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
* table.
*/
- cpumask_copy(opp_shared_cpus, policy->cpus);
+ cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/*
goto out_free_opp;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
goto out_free_opp;
}
- power_scale_mw = perf_ops->power_scale_mw_get(ph);
- em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
- opp_shared_cpus, power_scale_mw);
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out_free_opp;
+ priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
- free_cpumask_var(opp_shared_cpus);
return 0;
-out_free_priv:
- kfree(priv);
-
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
out_free_cpumask:
- free_cpumask_var(opp_shared_cpus);
+ free_cpumask_var(priv->opp_shared_cpus);
+
+out_free_priv:
+ kfree(priv);
return ret;
}
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
return 0;
}
+static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+ bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
+ struct scmi_data *priv = policy->driver_data;
+
+ /*
+ * This callback will be called for each policy, but we don't need to
+ * register with EM every time. Despite not being part of the same
+ * policy, some CPUs may still share their perf-domains, and a CPU from
+ * another policy may already have registered with EM on behalf of CPUs
+ * of this policy.
+ */
+ if (!priv->nr_opp)
+ return;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
+ &em_cb, priv->opp_shared_cpus,
+ power_scale_mw);
+}
+
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
+ .register_em = scmi_cpufreq_register_em,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)