#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
+#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
return ret;
}
+static int of_est_power(unsigned long *mW, unsigned long *KHz, int cpu)
+{
+ unsigned long mV, Hz, MHz;
+ struct device *cpu_dev;
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ u32 cap;
+ u64 tmp;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -EINVAL;
+
+ if (of_property_read_u32(np, "dynamic-power-coefficient", &cap))
+ return -EINVAL;
+
+ Hz = *KHz * 1000;
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
+ if (IS_ERR(opp))
+ return -EINVAL;
+
+ mV = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+
+ MHz = Hz / 1000000;
+ tmp = (u64)cap * mV * mV * MHz;
+ do_div(tmp, 1000000000);
+
+ *mW = (unsigned long)tmp;
+ *KHz = Hz / 1000;
+
+ return 0;
+}
+
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
+ struct em_data_callback em_cb = EM_DATA_CB(of_est_power);
u32 cur_cluster = cpu_to_cluster(policy->cpu);
struct device *cpu_dev;
int ret;
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ em_register_freq_domain(policy->cpus, ret, &em_cb);
+
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);