arm64: topology: move store_cpu_topology() to shared code
[platform/kernel/linux-rpi.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
20
21 #include <asm/cpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
24
25 #ifdef CONFIG_ACPI
26 static bool __init acpi_cpu_is_threaded(int cpu)
27 {
28         int is_threaded = acpi_pptt_cpu_is_thread(cpu);
29
30         /*
31          * if the PPTT doesn't have thread information, assume a homogeneous
32          * machine and return the current CPU's thread state.
33          */
34         if (is_threaded < 0)
35                 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
36
37         return !!is_threaded;
38 }
39
40 /*
41  * Propagate the topology information of the processor_topology_node tree to the
42  * cpu_topology array.
43  */
44 int __init parse_acpi_topology(void)
45 {
46         int cpu, topology_id;
47
48         if (acpi_disabled)
49                 return 0;
50
51         for_each_possible_cpu(cpu) {
52                 int i, cache_id;
53
54                 topology_id = find_acpi_cpu_topology(cpu, 0);
55                 if (topology_id < 0)
56                         return topology_id;
57
58                 if (acpi_cpu_is_threaded(cpu)) {
59                         cpu_topology[cpu].thread_id = topology_id;
60                         topology_id = find_acpi_cpu_topology(cpu, 1);
61                         cpu_topology[cpu].core_id   = topology_id;
62                 } else {
63                         cpu_topology[cpu].thread_id  = -1;
64                         cpu_topology[cpu].core_id    = topology_id;
65                 }
66                 topology_id = find_acpi_cpu_topology_package(cpu);
67                 cpu_topology[cpu].package_id = topology_id;
68
69                 i = acpi_find_last_cache_level(cpu);
70
71                 if (i > 0) {
72                         /*
73                          * this is the only part of cpu_topology that has
74                          * a direct relationship with the cache topology
75                          */
76                         cache_id = find_acpi_cpu_cache_topology(cpu, i);
77                         if (cache_id > 0)
78                                 cpu_topology[cpu].llc_id = cache_id;
79                 }
80         }
81
82         return 0;
83 }
84 #endif
85
86 #ifdef CONFIG_ARM64_AMU_EXTN
87 #define read_corecnt()  read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
88 #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
89 #else
90 #define read_corecnt()  (0UL)
91 #define read_constcnt() (0UL)
92 #endif
93
94 #undef pr_fmt
95 #define pr_fmt(fmt) "AMU: " fmt
96
97 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
98 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
99 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
100 static cpumask_var_t amu_fie_cpus;
101
102 void update_freq_counters_refs(void)
103 {
104         this_cpu_write(arch_core_cycles_prev, read_corecnt());
105         this_cpu_write(arch_const_cycles_prev, read_constcnt());
106 }
107
108 static inline bool freq_counters_valid(int cpu)
109 {
110         if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
111                 return false;
112
113         if (!cpu_has_amu_feat(cpu)) {
114                 pr_debug("CPU%d: counters are not supported.\n", cpu);
115                 return false;
116         }
117
118         if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
119                      !per_cpu(arch_core_cycles_prev, cpu))) {
120                 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
121                 return false;
122         }
123
124         return true;
125 }
126
127 static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
128 {
129         u64 ratio;
130
131         if (unlikely(!max_rate || !ref_rate)) {
132                 pr_debug("CPU%d: invalid maximum or reference frequency.\n",
133                          cpu);
134                 return -EINVAL;
135         }
136
137         /*
138          * Pre-compute the fixed ratio between the frequency of the constant
139          * reference counter and the maximum frequency of the CPU.
140          *
141          *                          ref_rate
142          * arch_max_freq_scale =   ---------- * SCHED_CAPACITY_SCALEĀ²
143          *                          max_rate
144          *
145          * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
146          * in order to ensure a good resolution for arch_max_freq_scale for
147          * very low reference frequencies (down to the KHz range which should
148          * be unlikely).
149          */
150         ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
151         ratio = div64_u64(ratio, max_rate);
152         if (!ratio) {
153                 WARN_ONCE(1, "Reference frequency too low.\n");
154                 return -EINVAL;
155         }
156
157         per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
158
159         return 0;
160 }
161
162 static void amu_scale_freq_tick(void)
163 {
164         u64 prev_core_cnt, prev_const_cnt;
165         u64 core_cnt, const_cnt, scale;
166
167         prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
168         prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
169
170         update_freq_counters_refs();
171
172         const_cnt = this_cpu_read(arch_const_cycles_prev);
173         core_cnt = this_cpu_read(arch_core_cycles_prev);
174
175         if (unlikely(core_cnt <= prev_core_cnt ||
176                      const_cnt <= prev_const_cnt))
177                 return;
178
179         /*
180          *          /\core    arch_max_freq_scale
181          * scale =  ------- * --------------------
182          *          /\const   SCHED_CAPACITY_SCALE
183          *
184          * See validate_cpu_freq_invariance_counters() for details on
185          * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
186          */
187         scale = core_cnt - prev_core_cnt;
188         scale *= this_cpu_read(arch_max_freq_scale);
189         scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
190                           const_cnt - prev_const_cnt);
191
192         scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
193         this_cpu_write(arch_freq_scale, (unsigned long)scale);
194 }
195
196 static struct scale_freq_data amu_sfd = {
197         .source = SCALE_FREQ_SOURCE_ARCH,
198         .set_freq_scale = amu_scale_freq_tick,
199 };
200
201 static void amu_fie_setup(const struct cpumask *cpus)
202 {
203         int cpu;
204
205         /* We are already set since the last insmod of cpufreq driver */
206         if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
207                 return;
208
209         for_each_cpu(cpu, cpus) {
210                 if (!freq_counters_valid(cpu) ||
211                     freq_inv_set_max_ratio(cpu,
212                                            cpufreq_get_hw_max_freq(cpu) * 1000ULL,
213                                            arch_timer_get_rate()))
214                         return;
215         }
216
217         cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
218
219         topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus);
220
221         pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
222                  cpumask_pr_args(cpus));
223 }
224
225 static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
226                                  void *data)
227 {
228         struct cpufreq_policy *policy = data;
229
230         if (val == CPUFREQ_CREATE_POLICY)
231                 amu_fie_setup(policy->related_cpus);
232
233         /*
234          * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU
235          * counters don't have any dependency on cpufreq driver once we have
236          * initialized AMU support and enabled invariance. The AMU counters will
237          * keep on working just fine in the absence of the cpufreq driver, and
238          * for the CPUs for which there are no counters available, the last set
239          * value of arch_freq_scale will remain valid as that is the frequency
240          * those CPUs are running at.
241          */
242
243         return 0;
244 }
245
246 static struct notifier_block init_amu_fie_notifier = {
247         .notifier_call = init_amu_fie_callback,
248 };
249
250 static int __init init_amu_fie(void)
251 {
252         int ret;
253
254         if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
255                 return -ENOMEM;
256
257         ret = cpufreq_register_notifier(&init_amu_fie_notifier,
258                                         CPUFREQ_POLICY_NOTIFIER);
259         if (ret)
260                 free_cpumask_var(amu_fie_cpus);
261
262         return ret;
263 }
264 core_initcall(init_amu_fie);
265
266 #ifdef CONFIG_ACPI_CPPC_LIB
267 #include <acpi/cppc_acpi.h>
268
269 static void cpu_read_corecnt(void *val)
270 {
271         /*
272          * A value of 0 can be returned if the current CPU does not support AMUs
273          * or if the counter is disabled for this CPU. A return value of 0 at
274          * counter read is properly handled as an error case by the users of the
275          * counter.
276          */
277         *(u64 *)val = read_corecnt();
278 }
279
280 static void cpu_read_constcnt(void *val)
281 {
282         /*
283          * Return 0 if the current CPU is affected by erratum 2457168. A value
284          * of 0 is also returned if the current CPU does not support AMUs or if
285          * the counter is disabled. A return value of 0 at counter read is
286          * properly handled as an error case by the users of the counter.
287          */
288         *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
289                       0UL : read_constcnt();
290 }
291
292 static inline
293 int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
294 {
295         /*
296          * Abort call on counterless CPU or when interrupts are
297          * disabled - can lead to deadlock in smp sync call.
298          */
299         if (!cpu_has_amu_feat(cpu))
300                 return -EOPNOTSUPP;
301
302         if (WARN_ON_ONCE(irqs_disabled()))
303                 return -EPERM;
304
305         smp_call_function_single(cpu, func, val, 1);
306
307         return 0;
308 }
309
310 /*
311  * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
312  * below.
313  */
314 bool cpc_ffh_supported(void)
315 {
316         int cpu = get_cpu_with_amu_feat();
317
318         /*
319          * FFH is considered supported if there is at least one present CPU that
320          * supports AMUs. Using FFH to read core and reference counters for CPUs
321          * that do not support AMUs, have counters disabled or that are affected
322          * by errata, will result in a return value of 0.
323          *
324          * This is done to allow any enabled and valid counters to be read
325          * through FFH, knowing that potentially returning 0 as counter value is
326          * properly handled by the users of these counters.
327          */
328         if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
329                 return false;
330
331         return true;
332 }
333
334 int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
335 {
336         int ret = -EOPNOTSUPP;
337
338         switch ((u64)reg->address) {
339         case 0x0:
340                 ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
341                 break;
342         case 0x1:
343                 ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
344                 break;
345         }
346
347         if (!ret) {
348                 *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
349                                     reg->bit_offset);
350                 *val >>= reg->bit_offset;
351         }
352
353         return ret;
354 }
355
356 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
357 {
358         return -EOPNOTSUPP;
359 }
360 #endif /* CONFIG_ACPI_CPPC_LIB */