2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
23 #define NUM_COUNTERS_NB 4
24 #define NUM_COUNTERS_L2 4
25 #define MAX_COUNTERS NUM_COUNTERS_NB
27 #define RDPMC_BASE_NB 6
28 #define RDPMC_BASE_L2 10
30 #define COUNTER_SHIFT 16
39 cpumask_t *active_mask;
41 struct perf_event *events[MAX_COUNTERS];
42 struct amd_uncore *free_when_cpu_online;
45 static struct amd_uncore * __percpu *amd_uncore_nb;
46 static struct amd_uncore * __percpu *amd_uncore_l2;
48 static struct pmu amd_nb_pmu;
49 static struct pmu amd_l2_pmu;
51 static cpumask_t amd_nb_active_mask;
52 static cpumask_t amd_l2_active_mask;
54 static bool is_nb_event(struct perf_event *event)
56 return event->pmu->type == amd_nb_pmu.type;
59 static bool is_l2_event(struct perf_event *event)
61 return event->pmu->type == amd_l2_pmu.type;
64 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
66 if (is_nb_event(event) && amd_uncore_nb)
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
68 else if (is_l2_event(event) && amd_uncore_l2)
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu);
74 static void amd_uncore_read(struct perf_event *event)
76 struct hw_perf_event *hwc = &event->hw;
81 * since we do not enable counter overflow interrupts,
82 * we do not have to worry about prev_count changing on us
85 prev = local64_read(&hwc->prev_count);
86 rdpmcl(hwc->event_base_rdpmc, new);
87 local64_set(&hwc->prev_count, new);
88 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
89 delta >>= COUNTER_SHIFT;
90 local64_add(delta, &event->count);
93 static void amd_uncore_start(struct perf_event *event, int flags)
95 struct hw_perf_event *hwc = &event->hw;
97 if (flags & PERF_EF_RELOAD)
98 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
101 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
102 perf_event_update_userpage(event);
105 static void amd_uncore_stop(struct perf_event *event, int flags)
107 struct hw_perf_event *hwc = &event->hw;
109 wrmsrl(hwc->config_base, hwc->config);
110 hwc->state |= PERF_HES_STOPPED;
112 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
113 amd_uncore_read(event);
114 hwc->state |= PERF_HES_UPTODATE;
118 static int amd_uncore_add(struct perf_event *event, int flags)
121 struct amd_uncore *uncore = event_to_amd_uncore(event);
122 struct hw_perf_event *hwc = &event->hw;
124 /* are we already assigned? */
125 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
128 for (i = 0; i < uncore->num_counters; i++) {
129 if (uncore->events[i] == event) {
135 /* if not, take the first available counter */
137 for (i = 0; i < uncore->num_counters; i++) {
138 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
148 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
149 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
150 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
151 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
153 if (flags & PERF_EF_START)
154 amd_uncore_start(event, PERF_EF_RELOAD);
159 static void amd_uncore_del(struct perf_event *event, int flags)
162 struct amd_uncore *uncore = event_to_amd_uncore(event);
163 struct hw_perf_event *hwc = &event->hw;
165 amd_uncore_stop(event, PERF_EF_UPDATE);
167 for (i = 0; i < uncore->num_counters; i++) {
168 if (cmpxchg(&uncore->events[i], event, NULL) == event)
175 static int amd_uncore_event_init(struct perf_event *event)
177 struct amd_uncore *uncore;
178 struct hw_perf_event *hwc = &event->hw;
180 if (event->attr.type != event->pmu->type)
184 * NB and L2 counters (MSRs) are shared across all cores that share the
185 * same NB / L2 cache. Interrupts can be directed to a single target
186 * core, however, event counts generated by processes running on other
187 * cores cannot be masked out. So we do not support sampling and
190 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
193 /* NB and L2 counters do not have usr/os/guest/host bits */
194 if (event->attr.exclude_user || event->attr.exclude_kernel ||
195 event->attr.exclude_host || event->attr.exclude_guest)
198 /* and we do not enable counter overflow interrupts */
199 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
205 uncore = event_to_amd_uncore(event);
210 * since request can come in to any of the shared cores, we will remap
211 * to a single common cpu.
213 event->cpu = uncore->cpu;
218 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
219 struct device_attribute *attr,
222 cpumask_t *active_mask;
223 struct pmu *pmu = dev_get_drvdata(dev);
225 if (pmu->type == amd_nb_pmu.type)
226 active_mask = &amd_nb_active_mask;
227 else if (pmu->type == amd_l2_pmu.type)
228 active_mask = &amd_l2_active_mask;
232 return cpumap_print_to_pagebuf(true, buf, active_mask);
234 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
236 static struct attribute *amd_uncore_attrs[] = {
237 &dev_attr_cpumask.attr,
241 static struct attribute_group amd_uncore_attr_group = {
242 .attrs = amd_uncore_attrs,
245 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
246 PMU_FORMAT_ATTR(umask, "config:8-15");
248 static struct attribute *amd_uncore_format_attr[] = {
249 &format_attr_event.attr,
250 &format_attr_umask.attr,
254 static struct attribute_group amd_uncore_format_group = {
256 .attrs = amd_uncore_format_attr,
259 static const struct attribute_group *amd_uncore_attr_groups[] = {
260 &amd_uncore_attr_group,
261 &amd_uncore_format_group,
265 static struct pmu amd_nb_pmu = {
266 .task_ctx_nr = perf_invalid_context,
267 .attr_groups = amd_uncore_attr_groups,
269 .event_init = amd_uncore_event_init,
270 .add = amd_uncore_add,
271 .del = amd_uncore_del,
272 .start = amd_uncore_start,
273 .stop = amd_uncore_stop,
274 .read = amd_uncore_read,
277 static struct pmu amd_l2_pmu = {
278 .task_ctx_nr = perf_invalid_context,
279 .attr_groups = amd_uncore_attr_groups,
281 .event_init = amd_uncore_event_init,
282 .add = amd_uncore_add,
283 .del = amd_uncore_del,
284 .start = amd_uncore_start,
285 .stop = amd_uncore_stop,
286 .read = amd_uncore_read,
289 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
291 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
295 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
297 struct amd_uncore *uncore_nb = NULL, *uncore_l2;
300 uncore_nb = amd_uncore_alloc(cpu);
303 uncore_nb->cpu = cpu;
304 uncore_nb->num_counters = NUM_COUNTERS_NB;
305 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
306 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
307 uncore_nb->active_mask = &amd_nb_active_mask;
308 uncore_nb->pmu = &amd_nb_pmu;
309 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
313 uncore_l2 = amd_uncore_alloc(cpu);
316 uncore_l2->cpu = cpu;
317 uncore_l2->num_counters = NUM_COUNTERS_L2;
318 uncore_l2->rdpmc_base = RDPMC_BASE_L2;
319 uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
320 uncore_l2->active_mask = &amd_l2_active_mask;
321 uncore_l2->pmu = &amd_l2_pmu;
322 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
329 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
334 static struct amd_uncore *
335 amd_uncore_find_online_sibling(struct amd_uncore *this,
336 struct amd_uncore * __percpu *uncores)
339 struct amd_uncore *that;
341 for_each_online_cpu(cpu) {
342 that = *per_cpu_ptr(uncores, cpu);
350 if (this->id == that->id) {
351 that->free_when_cpu_online = this;
361 static int amd_uncore_cpu_starting(unsigned int cpu)
363 unsigned int eax, ebx, ecx, edx;
364 struct amd_uncore *uncore;
367 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
368 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
369 uncore->id = ecx & 0xff;
371 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
372 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
376 unsigned int apicid = cpu_data(cpu).apicid;
377 unsigned int nshared;
379 uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
380 cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
381 nshared = ((eax >> 14) & 0xfff) + 1;
382 uncore->id = apicid - (apicid % nshared);
384 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
385 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
391 static void uncore_online(unsigned int cpu,
392 struct amd_uncore * __percpu *uncores)
394 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
396 kfree(uncore->free_when_cpu_online);
397 uncore->free_when_cpu_online = NULL;
399 if (cpu == uncore->cpu)
400 cpumask_set_cpu(cpu, uncore->active_mask);
403 static int amd_uncore_cpu_online(unsigned int cpu)
406 uncore_online(cpu, amd_uncore_nb);
409 uncore_online(cpu, amd_uncore_l2);
414 static void uncore_down_prepare(unsigned int cpu,
415 struct amd_uncore * __percpu *uncores)
418 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
420 if (this->cpu != cpu)
423 /* this cpu is going down, migrate to a shared sibling if possible */
424 for_each_online_cpu(i) {
425 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
431 perf_pmu_migrate_context(this->pmu, cpu, i);
432 cpumask_clear_cpu(cpu, that->active_mask);
433 cpumask_set_cpu(i, that->active_mask);
440 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
443 uncore_down_prepare(cpu, amd_uncore_nb);
446 uncore_down_prepare(cpu, amd_uncore_l2);
451 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
453 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
455 if (cpu == uncore->cpu)
456 cpumask_clear_cpu(cpu, uncore->active_mask);
458 if (!--uncore->refcnt)
460 *per_cpu_ptr(uncores, cpu) = NULL;
463 static int amd_uncore_cpu_dead(unsigned int cpu)
466 uncore_dead(cpu, amd_uncore_nb);
469 uncore_dead(cpu, amd_uncore_l2);
474 static int __init amd_uncore_init(void)
478 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
481 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
484 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
485 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
486 if (!amd_uncore_nb) {
490 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
494 pr_info("perf: AMD NB counters detected\n");
498 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
499 amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
500 if (!amd_uncore_l2) {
504 ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
508 pr_info("perf: AMD L2I counters detected\n");
513 * Install callbacks. Core will call them for each online cpu.
515 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
516 "PERF_X86_AMD_UNCORE_PREP",
517 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
520 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
521 "AP_PERF_X86_AMD_UNCORE_STARTING",
522 amd_uncore_cpu_starting, NULL))
524 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
525 "AP_PERF_X86_AMD_UNCORE_ONLINE",
526 amd_uncore_cpu_online,
527 amd_uncore_cpu_down_prepare))
532 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
534 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
536 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
537 perf_pmu_unregister(&amd_nb_pmu);
539 free_percpu(amd_uncore_l2);
542 free_percpu(amd_uncore_nb);
547 device_initcall(amd_uncore_init);