2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/cpufreq.h>
28 #include <asm/cputype.h>
29 #include <asm/topology.h>
31 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
32 static DEFINE_MUTEX(cpu_scale_mutex);
34 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
36 return per_cpu(cpu_scale, cpu);
39 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
41 per_cpu(cpu_scale, cpu) = capacity;
44 static ssize_t cpu_capacity_show(struct device *dev,
45 struct device_attribute *attr,
48 struct cpu *cpu = container_of(dev, struct cpu, dev);
50 return sprintf(buf, "%lu\n",
51 arch_scale_cpu_capacity(NULL, cpu->dev.id));
54 static ssize_t cpu_capacity_store(struct device *dev,
55 struct device_attribute *attr,
59 struct cpu *cpu = container_of(dev, struct cpu, dev);
60 int this_cpu = cpu->dev.id, i;
61 unsigned long new_capacity;
65 ret = kstrtoul(buf, 0, &new_capacity);
68 if (new_capacity > SCHED_CAPACITY_SCALE)
71 mutex_lock(&cpu_scale_mutex);
72 for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
73 set_capacity_scale(i, new_capacity);
74 mutex_unlock(&cpu_scale_mutex);
80 static DEVICE_ATTR_RW(cpu_capacity);
82 static int register_cpu_capacity_sysctl(void)
87 for_each_possible_cpu(i) {
88 cpu = get_cpu_device(i);
90 pr_err("%s: too early to get CPU%d device!\n",
94 device_create_file(cpu, &dev_attr_cpu_capacity);
99 subsys_initcall(register_cpu_capacity_sysctl);
101 static u32 capacity_scale;
102 static u32 *raw_capacity;
103 static bool cap_parsing_failed;
105 static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
110 if (cap_parsing_failed)
113 ret = of_property_read_u32(cpu_node,
114 "capacity-dmips-mhz",
118 raw_capacity = kcalloc(num_possible_cpus(),
119 sizeof(*raw_capacity),
122 pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
123 cap_parsing_failed = true;
127 capacity_scale = max(cpu_capacity, capacity_scale);
128 raw_capacity[cpu] = cpu_capacity;
129 pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
130 cpu_node->full_name, raw_capacity[cpu]);
133 pr_err("cpu_capacity: missing %s raw capacity\n",
134 cpu_node->full_name);
135 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
137 cap_parsing_failed = true;
142 static void normalize_cpu_capacity(void)
147 if (!raw_capacity || cap_parsing_failed)
150 pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
151 mutex_lock(&cpu_scale_mutex);
152 for_each_possible_cpu(cpu) {
153 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
154 cpu, raw_capacity[cpu]);
155 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
157 set_capacity_scale(cpu, capacity);
158 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
159 cpu, arch_scale_cpu_capacity(NULL, cpu));
161 mutex_unlock(&cpu_scale_mutex);
164 #ifdef CONFIG_CPU_FREQ
165 static cpumask_var_t cpus_to_visit;
166 static bool cap_parsing_done;
167 static void parsing_done_workfn(struct work_struct *work);
168 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
171 init_cpu_capacity_callback(struct notifier_block *nb,
175 struct cpufreq_policy *policy = data;
178 if (cap_parsing_failed || cap_parsing_done)
183 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
184 cpumask_pr_args(policy->related_cpus),
185 cpumask_pr_args(cpus_to_visit));
186 cpumask_andnot(cpus_to_visit,
188 policy->related_cpus);
189 for_each_cpu(cpu, policy->related_cpus) {
190 raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
191 policy->cpuinfo.max_freq / 1000UL;
192 capacity_scale = max(raw_capacity[cpu], capacity_scale);
194 if (cpumask_empty(cpus_to_visit)) {
195 normalize_cpu_capacity();
197 pr_debug("cpu_capacity: parsing done\n");
198 cap_parsing_done = true;
199 schedule_work(&parsing_done_work);
205 static struct notifier_block init_cpu_capacity_notifier = {
206 .notifier_call = init_cpu_capacity_callback,
209 static int __init register_cpufreq_notifier(void)
212 * on ACPI-based systems we need to use the default cpu capacity
213 * until we have the necessary code to parse the cpu capacity, so
214 * skip registering cpufreq notifier.
216 if (!acpi_disabled || cap_parsing_failed)
219 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
220 pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
223 cpumask_copy(cpus_to_visit, cpu_possible_mask);
225 return cpufreq_register_notifier(&init_cpu_capacity_notifier,
226 CPUFREQ_POLICY_NOTIFIER);
228 core_initcall(register_cpufreq_notifier);
230 static void parsing_done_workfn(struct work_struct *work)
232 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
233 CPUFREQ_POLICY_NOTIFIER);
237 static int __init free_raw_capacity(void)
243 core_initcall(free_raw_capacity);
246 static int __init get_cpu_for_node(struct device_node *node)
248 struct device_node *cpu_node;
251 cpu_node = of_parse_phandle(node, "cpu", 0);
255 for_each_possible_cpu(cpu) {
256 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
257 parse_cpu_capacity(cpu_node, cpu);
258 of_node_put(cpu_node);
263 pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
265 of_node_put(cpu_node);
269 static int __init parse_core(struct device_node *core, int cluster_id,
276 struct device_node *t;
279 snprintf(name, sizeof(name), "thread%d", i);
280 t = of_get_child_by_name(core, name);
283 cpu = get_cpu_for_node(t);
285 cpu_topology[cpu].cluster_id = cluster_id;
286 cpu_topology[cpu].core_id = core_id;
287 cpu_topology[cpu].thread_id = i;
289 pr_err("%s: Can't get CPU for thread\n",
299 cpu = get_cpu_for_node(core);
302 pr_err("%s: Core has both threads and CPU\n",
307 cpu_topology[cpu].cluster_id = cluster_id;
308 cpu_topology[cpu].core_id = core_id;
310 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
317 static int __init parse_cluster(struct device_node *cluster, int depth)
321 bool has_cores = false;
322 struct device_node *c;
323 static int cluster_id __initdata;
328 * First check for child clusters; we currently ignore any
329 * information about the nesting of clusters and present the
330 * scheduler with a flat list of them.
334 snprintf(name, sizeof(name), "cluster%d", i);
335 c = of_get_child_by_name(cluster, name);
338 ret = parse_cluster(c, depth + 1);
346 /* Now check for cores */
349 snprintf(name, sizeof(name), "core%d", i);
350 c = of_get_child_by_name(cluster, name);
355 pr_err("%s: cpu-map children should be clusters\n",
362 ret = parse_core(c, cluster_id, core_id++);
364 pr_err("%s: Non-leaf cluster with core %s\n",
365 cluster->full_name, name);
376 if (leaf && !has_cores)
377 pr_warn("%s: empty cluster\n", cluster->full_name);
385 static int __init parse_dt_topology(void)
387 struct device_node *cn, *map;
391 cn = of_find_node_by_path("/cpus");
393 pr_err("No CPU information found in DT\n");
398 * When topology is provided cpu-map is essentially a root
399 * cluster with restricted subnodes.
401 map = of_get_child_by_name(cn, "cpu-map");
403 cap_parsing_failed = true;
407 ret = parse_cluster(map, 0);
411 normalize_cpu_capacity();
414 * Check that all cores are in the topology; the SMP code will
415 * only mark cores described in the DT as possible.
417 for_each_possible_cpu(cpu)
418 if (cpu_topology[cpu].cluster_id == -1)
431 struct cpu_topology cpu_topology[NR_CPUS];
432 EXPORT_SYMBOL_GPL(cpu_topology);
434 const struct cpumask *cpu_coregroup_mask(int cpu)
436 return &cpu_topology[cpu].core_sibling;
439 static void update_siblings_masks(unsigned int cpuid)
441 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
444 /* update core and thread sibling masks */
445 for_each_possible_cpu(cpu) {
446 cpu_topo = &cpu_topology[cpu];
448 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
451 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
453 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
455 if (cpuid_topo->core_id != cpu_topo->core_id)
458 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
460 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
464 void store_cpu_topology(unsigned int cpuid)
466 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
469 if (cpuid_topo->cluster_id != -1)
470 goto topology_populated;
472 mpidr = read_cpuid_mpidr();
474 /* Uniprocessor systems can rely on default topology values */
475 if (mpidr & MPIDR_UP_BITMASK)
478 /* Create cpu topology mapping based on MPIDR. */
479 if (mpidr & MPIDR_MT_BITMASK) {
480 /* Multiprocessor system : Multi-threads per core */
481 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
482 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
483 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
484 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
486 /* Multiprocessor system : Single-thread per core */
487 cpuid_topo->thread_id = -1;
488 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
489 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
490 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
491 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
494 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
495 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
496 cpuid_topo->thread_id, mpidr);
499 update_siblings_masks(cpuid);
502 static void __init reset_cpu_topology(void)
506 for_each_possible_cpu(cpu) {
507 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
509 cpu_topo->thread_id = -1;
510 cpu_topo->core_id = 0;
511 cpu_topo->cluster_id = -1;
513 cpumask_clear(&cpu_topo->core_sibling);
514 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
515 cpumask_clear(&cpu_topo->thread_sibling);
516 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
520 void __init init_cpu_topology(void)
522 reset_cpu_topology();
525 * Discard anything that was parsed if we hit an error so we
526 * don't use partial information.
528 if (of_have_populated_dt() && parse_dt_topology())
529 reset_cpu_topology();