1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/zalloc.h>
4 #include <subcmd/pager.h>
14 #include "print-events.h"
17 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
18 * directory contains "cpus" file. All PMUs belonging to core_pmus
19 * must have pmu->is_core=1. If there are more than one PMU in
20 * this list, perf interprets it as a heterogeneous platform.
21 * (FWIW, certain ARM platforms having heterogeneous cores uses
22 * homogeneous PMU, and thus they are treated as homogeneous
23 * platform by perf because core_pmus will have only one entry)
24 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
25 * matter whether PMU is present per SMT-thread or outside of the
26 * core in the hw. For e.g., an instance of AMD ibs_fetch// and
27 * ibs_op// PMUs is present in each hw SMT thread, however they
28 * are captured under other_pmus. PMUs belonging to other_pmus
29 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
31 static LIST_HEAD(core_pmus);
32 static LIST_HEAD(other_pmus);
33 static bool read_sysfs_core_pmus;
34 static bool read_sysfs_all_pmus;
36 void perf_pmus__destroy(void)
38 struct perf_pmu *pmu, *tmp;
40 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
43 perf_pmu__delete(pmu);
45 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
48 perf_pmu__delete(pmu);
50 read_sysfs_core_pmus = false;
51 read_sysfs_all_pmus = false;
54 static struct perf_pmu *pmu_find(const char *name)
58 list_for_each_entry(pmu, &core_pmus, list) {
59 if (!strcmp(pmu->name, name) ||
60 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
63 list_for_each_entry(pmu, &other_pmus, list) {
64 if (!strcmp(pmu->name, name) ||
65 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
72 struct perf_pmu *perf_pmus__find(const char *name)
79 * Once PMU is loaded it stays in the list,
80 * so we keep us from multiple reading/parsing
81 * the pmu format definitions.
87 if (read_sysfs_all_pmus)
90 core_pmu = is_pmu_core(name);
91 if (core_pmu && read_sysfs_core_pmus)
94 dirfd = perf_pmu__event_source_devices_fd();
95 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
101 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
103 struct perf_pmu *pmu;
107 * Once PMU is loaded it stays in the list,
108 * so we keep us from multiple reading/parsing
109 * the pmu format definitions.
111 pmu = pmu_find(name);
115 if (read_sysfs_all_pmus)
118 core_pmu = is_pmu_core(name);
119 if (core_pmu && read_sysfs_core_pmus)
122 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
125 /* Add all pmus in sysfs to pmu list: */
126 static void pmu_read_sysfs(bool core_only)
132 if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
135 fd = perf_pmu__event_source_devices_fd();
145 while ((dent = readdir(dir))) {
146 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
148 if (core_only && !is_pmu_core(dent->d_name))
150 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
151 perf_pmu__find2(fd, dent->d_name);
155 if (list_empty(&core_pmus)) {
156 if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
157 pr_err("Failure to set up any core PMUs\n");
159 if (!list_empty(&core_pmus)) {
160 read_sysfs_core_pmus = true;
162 read_sysfs_all_pmus = true;
166 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
168 struct perf_pmu *pmu;
170 list_for_each_entry(pmu, &core_pmus, list) {
171 if (pmu->type == type)
175 list_for_each_entry(pmu, &other_pmus, list) {
176 if (pmu->type == type)
182 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
184 struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
186 if (pmu || read_sysfs_all_pmus)
189 pmu_read_sysfs(/*core_only=*/false);
190 pmu = __perf_pmus__find_by_type(type);
195 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
196 * next pmu. Returns NULL on end.
198 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
200 bool use_core_pmus = !pmu || pmu->is_core;
203 pmu_read_sysfs(/*core_only=*/false);
204 pmu = list_prepare_entry(pmu, &core_pmus, list);
207 list_for_each_entry_continue(pmu, &core_pmus, list)
211 pmu = list_prepare_entry(pmu, &other_pmus, list);
213 list_for_each_entry_continue(pmu, &other_pmus, list)
218 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
221 pmu_read_sysfs(/*core_only=*/true);
222 pmu = list_prepare_entry(pmu, &core_pmus, list);
224 list_for_each_entry_continue(pmu, &core_pmus, list)
230 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
232 struct perf_pmu *pmu = NULL;
234 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
235 if (!strcmp(pmu->name, str))
237 /* Ignore "uncore_" prefix. */
238 if (!strncmp(pmu->name, "uncore_", 7)) {
239 if (!strcmp(pmu->name + 7, str))
242 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */
243 if (!strncmp(pmu->name, "cpu_", 4)) {
244 if (!strcmp(pmu->name + 4, str))
251 int __weak perf_pmus__num_mem_pmus(void)
253 /* All core PMUs are for mem events. */
254 return perf_pmus__num_core_pmus();
257 /** Struct for ordering events as output in perf list. */
259 /** PMU for event. */
260 const struct perf_pmu *pmu;
262 * Optional event for name, desc, etc. If not present then this is a
263 * selectable PMU and the event name is shown as "//".
265 const struct perf_pmu_alias *event;
266 /** Is the PMU for the CPU? */
270 static int cmp_sevent(const void *a, const void *b)
272 const struct sevent *as = a;
273 const struct sevent *bs = b;
274 const char *a_pmu_name = NULL, *b_pmu_name = NULL;
275 const char *a_name = "//", *a_desc = NULL, *a_topic = "";
276 const char *b_name = "//", *b_desc = NULL, *b_topic = "";
280 a_name = as->event->name;
281 a_desc = as->event->desc;
282 a_topic = as->event->topic ?: "";
283 a_pmu_name = as->event->pmu_name;
286 b_name = bs->event->name;
287 b_desc = bs->event->desc;
288 b_topic = bs->event->topic ?: "";
289 b_pmu_name = bs->event->pmu_name;
291 /* Put extra events last. */
292 if (!!a_desc != !!b_desc)
293 return !!a_desc - !!b_desc;
295 /* Order by topics. */
296 ret = strcmp(a_topic, b_topic);
300 /* Order CPU core events to be first */
301 if (as->is_cpu != bs->is_cpu)
302 return as->is_cpu ? -1 : 1;
304 /* Order by PMU name. */
305 if (as->pmu != bs->pmu) {
306 a_pmu_name = a_pmu_name ?: (as->pmu->name ?: "");
307 b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: "");
308 ret = strcmp(a_pmu_name, b_pmu_name);
313 /* Order by event name. */
314 return strcmp(a_name, b_name);
317 static bool pmu_alias_is_duplicate(struct sevent *alias_a,
318 struct sevent *alias_b)
320 const char *a_pmu_name = NULL, *b_pmu_name = NULL;
321 const char *a_name = "//", *b_name = "//";
324 if (alias_a->event) {
325 a_name = alias_a->event->name;
326 a_pmu_name = alias_a->event->pmu_name;
328 if (alias_b->event) {
329 b_name = alias_b->event->name;
330 b_pmu_name = alias_b->event->pmu_name;
333 /* Different names -> never duplicates */
334 if (strcmp(a_name, b_name))
337 /* Don't remove duplicates for different PMUs */
338 a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: "");
339 b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: "");
340 return strcmp(a_pmu_name, b_pmu_name) == 0;
343 static int sub_non_neg(int a, int b)
350 static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
351 const struct perf_pmu_alias *alias)
353 struct parse_events_term *term;
354 int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
356 list_for_each_entry(term, &alias->terms, list) {
357 if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
358 used += snprintf(buf + used, sub_non_neg(len, used),
359 ",%s=%s", term->config,
363 if (sub_non_neg(len, used) > 0) {
367 if (sub_non_neg(len, used) > 0) {
376 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
378 struct perf_pmu *pmu;
379 struct perf_pmu_alias *event;
383 struct sevent *aliases;
387 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
388 list_for_each_entry(event, &pmu->aliases, list)
393 aliases = zalloc(sizeof(struct sevent) * len);
395 pr_err("FATAL: not enough memory to print PMU events\n");
400 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
401 bool is_cpu = pmu->is_core;
403 list_for_each_entry(event, &pmu->aliases, list) {
404 aliases[j].event = event;
405 aliases[j].pmu = pmu;
406 aliases[j].is_cpu = is_cpu;
409 if (pmu->selectable) {
410 aliases[j].event = NULL;
411 aliases[j].pmu = pmu;
412 aliases[j].is_cpu = is_cpu;
417 qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
418 for (j = 0; j < len; j++) {
419 const char *name, *alias = NULL, *scale_unit = NULL,
420 *desc = NULL, *long_desc = NULL,
421 *encoding_desc = NULL, *topic = NULL,
423 bool deprecated = false;
426 /* Skip duplicates */
427 if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
430 if (!aliases[j].event) {
431 /* A selectable event. */
432 pmu_name = aliases[j].pmu->name;
433 buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1;
436 if (aliases[j].event->desc) {
437 name = aliases[j].event->name;
440 name = format_alias(buf, sizeof(buf), aliases[j].pmu,
442 if (aliases[j].is_cpu) {
444 name = aliases[j].event->name;
446 buf_used = strlen(buf) + 1;
448 pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: "");
449 if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) {
450 scale_unit = buf + buf_used;
451 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
452 "%G%s", aliases[j].event->scale,
453 aliases[j].event->unit) + 1;
455 desc = aliases[j].event->desc;
456 long_desc = aliases[j].event->long_desc;
457 topic = aliases[j].event->topic;
458 encoding_desc = buf + buf_used;
459 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
460 "%s/%s/", pmu_name, aliases[j].event->str) + 1;
461 deprecated = aliases[j].event->deprecated;
463 print_cb->print_event(print_state,
475 if (printed && pager_in_use())
481 bool perf_pmus__have_event(const char *pname, const char *name)
483 struct perf_pmu *pmu = perf_pmus__find(pname);
485 return pmu && perf_pmu__have_event(pmu, name);
488 int perf_pmus__num_core_pmus(void)
493 struct perf_pmu *pmu = NULL;
495 while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
501 static bool __perf_pmus__supports_extended_type(void)
503 struct perf_pmu *pmu = NULL;
505 if (perf_pmus__num_core_pmus() <= 1)
508 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
509 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
516 static bool perf_pmus__do_support_extended_type;
518 static void perf_pmus__init_supports_extended_type(void)
520 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
523 bool perf_pmus__supports_extended_type(void)
525 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
527 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
529 return perf_pmus__do_support_extended_type;
532 char *perf_pmus__default_pmu_name(void)
539 if (!list_empty(&core_pmus))
540 return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
542 fd = perf_pmu__event_source_devices_fd();
544 return strdup("cpu");
549 return strdup("cpu");
552 while ((dent = readdir(dir))) {
553 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
555 if (is_pmu_core(dent->d_name)) {
556 result = strdup(dent->d_name);
562 return result ?: strdup("cpu");
565 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
567 struct perf_pmu *pmu = evsel->pmu;
570 pmu = perf_pmus__find_by_type(evsel->core.attr.type);
571 ((struct evsel *)evsel)->pmu = pmu;