perf parse-events: Sort and group parsed events
[platform/kernel/linux-starfive.git] / tools / perf / arch / x86 / util / evlist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include "util/pmu.h"
4 #include "util/evlist.h"
5 #include "util/parse-events.h"
6 #include "util/event.h"
7 #include "util/pmu-hybrid.h"
8 #include "topdown.h"
9
10 static int ___evlist__add_default_attrs(struct evlist *evlist,
11                                         struct perf_event_attr *attrs,
12                                         size_t nr_attrs)
13 {
14         struct perf_cpu_map *cpus;
15         struct evsel *evsel, *n;
16         struct perf_pmu *pmu;
17         LIST_HEAD(head);
18         size_t i = 0;
19
20         for (i = 0; i < nr_attrs; i++)
21                 event_attr_init(attrs + i);
22
23         if (!perf_pmu__has_hybrid())
24                 return evlist__add_attrs(evlist, attrs, nr_attrs);
25
26         for (i = 0; i < nr_attrs; i++) {
27                 if (attrs[i].type == PERF_TYPE_SOFTWARE) {
28                         evsel = evsel__new(attrs + i);
29                         if (evsel == NULL)
30                                 goto out_delete_partial_list;
31                         list_add_tail(&evsel->core.node, &head);
32                         continue;
33                 }
34
35                 perf_pmu__for_each_hybrid_pmu(pmu) {
36                         evsel = evsel__new(attrs + i);
37                         if (evsel == NULL)
38                                 goto out_delete_partial_list;
39                         evsel->core.attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
40                         cpus = perf_cpu_map__get(pmu->cpus);
41                         evsel->core.cpus = cpus;
42                         evsel->core.own_cpus = perf_cpu_map__get(cpus);
43                         evsel->pmu_name = strdup(pmu->name);
44                         list_add_tail(&evsel->core.node, &head);
45                 }
46         }
47
48         evlist__splice_list_tail(evlist, &head);
49
50         return 0;
51
52 out_delete_partial_list:
53         __evlist__for_each_entry_safe(&head, n, evsel)
54                 evsel__delete(evsel);
55         return -1;
56 }
57
58 int arch_evlist__add_default_attrs(struct evlist *evlist,
59                                    struct perf_event_attr *attrs,
60                                    size_t nr_attrs)
61 {
62         if (!nr_attrs)
63                 return 0;
64
65         return ___evlist__add_default_attrs(evlist, attrs, nr_attrs);
66 }
67
68 int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
69 {
70         if (topdown_sys_has_perf_metrics() &&
71             (!lhs->pmu_name || !strncmp(lhs->pmu_name, "cpu", 3))) {
72                 /* Ensure the topdown slots comes first. */
73                 if (strcasestr(lhs->name, "slots"))
74                         return -1;
75                 if (strcasestr(rhs->name, "slots"))
76                         return 1;
77                 /* Followed by topdown events. */
78                 if (strcasestr(lhs->name, "topdown") && !strcasestr(rhs->name, "topdown"))
79                         return -1;
80                 if (!strcasestr(lhs->name, "topdown") && strcasestr(rhs->name, "topdown"))
81                         return 1;
82         }
83
84         /* Default ordering by insertion index. */
85         return lhs->core.idx - rhs->core.idx;
86 }