struct freq_qos_request throttle_freq_req;
};
+static struct {
+ struct qcom_cpufreq_data *data;
+} qcom_cpufreq;
+
static unsigned long cpu_hw_rate, xo_rate;
static bool icc_scaling_enabled;
struct of_phandle_args args;
struct device_node *cpu_np;
struct device *cpu_dev;
- struct resource *res;
- void __iomem *base;
struct qcom_cpufreq_data *data;
int ret, index;
return ret;
index = args.args[0];
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- if (!res) {
- dev_err(dev, "failed to get mem resource %d\n", index);
- return -ENODEV;
- }
-
- if (!request_mem_region(res->start, resource_size(res), res->name)) {
- dev_err(dev, "failed to request resource %pR\n", res);
- return -EBUSY;
- }
-
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- dev_err(dev, "failed to map resource %pR\n", res);
- ret = -ENOMEM;
- goto release_region;
- }
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto unmap_base;
- }
-
data->soc_data = of_device_get_match_data(&pdev->dev);
- data->base = base;
- data->res = res;
+ data = &qcom_cpufreq.data[index];
/* HW should be in enabled state to proceed */
- if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
+ if (!(readl_relaxed(data->base + data->soc_data->reg_enable) & 0x1)) {
dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
- if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
+ if (readl_relaxed(data->base + data->soc_data->reg_dcvs_ctrl) & 0x1)
data->per_core_dcvs = true;
qcom_get_related_cpus(index, policy->cpus);
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index);
- goto error;
+ return ret;
}
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_err(cpu_dev, "Failed to add OPPs\n");
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
if (policy_has_boost_freq(policy)) {
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
- ret = qcom_cpufreq_hw_lmh_init(policy, index);
- if (ret)
- goto error;
-
- return 0;
-error:
- kfree(data);
-unmap_base:
- iounmap(base);
-release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return qcom_cpufreq_hw_lmh_init(policy, index);
}
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
struct clk *clk;
- int ret;
+ int ret, i, num_domains;
clk = clk_get(&pdev->dev, "xo");
if (IS_ERR(clk))
if (ret)
return ret;
+ /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
+ num_domains = of_property_count_elems_of_size(pdev->dev.of_node, "reg", sizeof(u32) * 4);
+ if (num_domains <= 0)
+ return num_domains;
+
+ qcom_cpufreq.data = devm_kzalloc(&pdev->dev, sizeof(struct qcom_cpufreq_data) * num_domains,
+ GFP_KERNEL);
+ if (!qcom_cpufreq.data)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++) {
+ struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, i, &res);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "Failed to map resource %pR\n", res);
+ return PTR_ERR(base);
+ }
+
+ data->base = base;
+ data->res = res;
+ }
+
ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
if (ret)
dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");