perf evsel: Remove use_uncore_alias
[platform/kernel/linux-starfive.git] / tools / perf / util / parse-events.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9 #include <sys/param.h>
10 #include "term.h"
11 #include "evlist.h"
12 #include "evsel.h"
13 #include <subcmd/parse-options.h>
14 #include "parse-events.h"
15 #include "string2.h"
16 #include "strlist.h"
17 #include "bpf-loader.h"
18 #include "debug.h"
19 #include <api/fs/tracing_path.h>
20 #include <perf/cpumap.h>
21 #include "parse-events-bison.h"
22 #include "parse-events-flex.h"
23 #include "pmu.h"
24 #include "asm/bug.h"
25 #include "util/parse-branch-options.h"
26 #include "util/evsel_config.h"
27 #include "util/event.h"
28 #include "perf.h"
29 #include "util/parse-events-hybrid.h"
30 #include "util/pmu-hybrid.h"
31 #include "tracepoint.h"
32 #include "thread_map.h"
33
34 #define MAX_NAME_LEN 100
35
36 struct perf_pmu_event_symbol {
37         char    *symbol;
38         enum perf_pmu_event_symbol_type type;
39 };
40
41 #ifdef PARSER_DEBUG
42 extern int parse_events_debug;
43 #endif
44 int parse_events_parse(void *parse_state, void *scanner);
45 static int get_config_terms(struct list_head *head_config,
46                             struct list_head *head_terms __maybe_unused);
47 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
48                                          const char *str, char *pmu_name,
49                                          struct list_head *list);
50
51 static struct perf_pmu_event_symbol *perf_pmu_events_list;
52 /*
53  * The variable indicates the number of supported pmu event symbols.
54  * 0 means not initialized and ready to init
55  * -1 means failed to init, don't try anymore
56  * >0 is the number of supported pmu event symbols
57  */
58 static int perf_pmu_events_list_num;
59
60 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
61         [PERF_COUNT_HW_CPU_CYCLES] = {
62                 .symbol = "cpu-cycles",
63                 .alias  = "cycles",
64         },
65         [PERF_COUNT_HW_INSTRUCTIONS] = {
66                 .symbol = "instructions",
67                 .alias  = "",
68         },
69         [PERF_COUNT_HW_CACHE_REFERENCES] = {
70                 .symbol = "cache-references",
71                 .alias  = "",
72         },
73         [PERF_COUNT_HW_CACHE_MISSES] = {
74                 .symbol = "cache-misses",
75                 .alias  = "",
76         },
77         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
78                 .symbol = "branch-instructions",
79                 .alias  = "branches",
80         },
81         [PERF_COUNT_HW_BRANCH_MISSES] = {
82                 .symbol = "branch-misses",
83                 .alias  = "",
84         },
85         [PERF_COUNT_HW_BUS_CYCLES] = {
86                 .symbol = "bus-cycles",
87                 .alias  = "",
88         },
89         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
90                 .symbol = "stalled-cycles-frontend",
91                 .alias  = "idle-cycles-frontend",
92         },
93         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
94                 .symbol = "stalled-cycles-backend",
95                 .alias  = "idle-cycles-backend",
96         },
97         [PERF_COUNT_HW_REF_CPU_CYCLES] = {
98                 .symbol = "ref-cycles",
99                 .alias  = "",
100         },
101 };
102
103 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
104         [PERF_COUNT_SW_CPU_CLOCK] = {
105                 .symbol = "cpu-clock",
106                 .alias  = "",
107         },
108         [PERF_COUNT_SW_TASK_CLOCK] = {
109                 .symbol = "task-clock",
110                 .alias  = "",
111         },
112         [PERF_COUNT_SW_PAGE_FAULTS] = {
113                 .symbol = "page-faults",
114                 .alias  = "faults",
115         },
116         [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
117                 .symbol = "context-switches",
118                 .alias  = "cs",
119         },
120         [PERF_COUNT_SW_CPU_MIGRATIONS] = {
121                 .symbol = "cpu-migrations",
122                 .alias  = "migrations",
123         },
124         [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
125                 .symbol = "minor-faults",
126                 .alias  = "",
127         },
128         [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
129                 .symbol = "major-faults",
130                 .alias  = "",
131         },
132         [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
133                 .symbol = "alignment-faults",
134                 .alias  = "",
135         },
136         [PERF_COUNT_SW_EMULATION_FAULTS] = {
137                 .symbol = "emulation-faults",
138                 .alias  = "",
139         },
140         [PERF_COUNT_SW_DUMMY] = {
141                 .symbol = "dummy",
142                 .alias  = "",
143         },
144         [PERF_COUNT_SW_BPF_OUTPUT] = {
145                 .symbol = "bpf-output",
146                 .alias  = "",
147         },
148         [PERF_COUNT_SW_CGROUP_SWITCHES] = {
149                 .symbol = "cgroup-switches",
150                 .alias  = "",
151         },
152 };
153
154 bool is_event_supported(u8 type, u64 config)
155 {
156         bool ret = true;
157         int open_return;
158         struct evsel *evsel;
159         struct perf_event_attr attr = {
160                 .type = type,
161                 .config = config,
162                 .disabled = 1,
163         };
164         struct perf_thread_map *tmap = thread_map__new_by_tid(0);
165
166         if (tmap == NULL)
167                 return false;
168
169         evsel = evsel__new(&attr);
170         if (evsel) {
171                 open_return = evsel__open(evsel, NULL, tmap);
172                 ret = open_return >= 0;
173
174                 if (open_return == -EACCES) {
175                         /*
176                          * This happens if the paranoid value
177                          * /proc/sys/kernel/perf_event_paranoid is set to 2
178                          * Re-run with exclude_kernel set; we don't do that
179                          * by default as some ARM machines do not support it.
180                          *
181                          */
182                         evsel->core.attr.exclude_kernel = 1;
183                         ret = evsel__open(evsel, NULL, tmap) >= 0;
184                 }
185                 evsel__delete(evsel);
186         }
187
188         perf_thread_map__put(tmap);
189         return ret;
190 }
191
192 const char *event_type(int type)
193 {
194         switch (type) {
195         case PERF_TYPE_HARDWARE:
196                 return "hardware";
197
198         case PERF_TYPE_SOFTWARE:
199                 return "software";
200
201         case PERF_TYPE_TRACEPOINT:
202                 return "tracepoint";
203
204         case PERF_TYPE_HW_CACHE:
205                 return "hardware-cache";
206
207         default:
208                 break;
209         }
210
211         return "unknown";
212 }
213
214 static char *get_config_str(struct list_head *head_terms, int type_term)
215 {
216         struct parse_events_term *term;
217
218         if (!head_terms)
219                 return NULL;
220
221         list_for_each_entry(term, head_terms, list)
222                 if (term->type_term == type_term)
223                         return term->val.str;
224
225         return NULL;
226 }
227
228 static char *get_config_metric_id(struct list_head *head_terms)
229 {
230         return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
231 }
232
233 static char *get_config_name(struct list_head *head_terms)
234 {
235         return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
236 }
237
238 static struct evsel *
239 __add_event(struct list_head *list, int *idx,
240             struct perf_event_attr *attr,
241             bool init_attr,
242             const char *name, const char *metric_id, struct perf_pmu *pmu,
243             struct list_head *config_terms, bool auto_merge_stats,
244             const char *cpu_list)
245 {
246         struct evsel *evsel;
247         struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
248                                cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
249
250         if (pmu)
251                 perf_pmu__warn_invalid_formats(pmu);
252
253         if (pmu && attr->type == PERF_TYPE_RAW)
254                 perf_pmu__warn_invalid_config(pmu, attr->config, name);
255
256         if (init_attr)
257                 event_attr_init(attr);
258
259         evsel = evsel__new_idx(attr, *idx);
260         if (!evsel) {
261                 perf_cpu_map__put(cpus);
262                 return NULL;
263         }
264
265         (*idx)++;
266         evsel->core.cpus = cpus;
267         evsel->core.own_cpus = perf_cpu_map__get(cpus);
268         evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
269         evsel->auto_merge_stats = auto_merge_stats;
270         evsel->pmu = pmu;
271
272         if (name)
273                 evsel->name = strdup(name);
274
275         if (metric_id)
276                 evsel->metric_id = strdup(metric_id);
277
278         if (config_terms)
279                 list_splice_init(config_terms, &evsel->config_terms);
280
281         if (list)
282                 list_add_tail(&evsel->core.node, list);
283
284         return evsel;
285 }
286
287 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
288                                       const char *name, const char *metric_id,
289                                       struct perf_pmu *pmu)
290 {
291         return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
292                            metric_id, pmu, /*config_terms=*/NULL,
293                            /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
294 }
295
296 static int add_event(struct list_head *list, int *idx,
297                      struct perf_event_attr *attr, const char *name,
298                      const char *metric_id, struct list_head *config_terms)
299 {
300         return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
301                            /*pmu=*/NULL, config_terms,
302                            /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
303 }
304
305 static int add_event_tool(struct list_head *list, int *idx,
306                           enum perf_tool_event tool_event)
307 {
308         struct evsel *evsel;
309         struct perf_event_attr attr = {
310                 .type = PERF_TYPE_SOFTWARE,
311                 .config = PERF_COUNT_SW_DUMMY,
312         };
313
314         evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
315                             /*metric_id=*/NULL, /*pmu=*/NULL,
316                             /*config_terms=*/NULL, /*auto_merge_stats=*/false,
317                             /*cpu_list=*/"0");
318         if (!evsel)
319                 return -ENOMEM;
320         evsel->tool_event = tool_event;
321         if (tool_event == PERF_TOOL_DURATION_TIME
322             || tool_event == PERF_TOOL_USER_TIME
323             || tool_event == PERF_TOOL_SYSTEM_TIME) {
324                 free((char *)evsel->unit);
325                 evsel->unit = strdup("ns");
326         }
327         return 0;
328 }
329
330 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size)
331 {
332         int i, j;
333         int n, longest = -1;
334
335         for (i = 0; i < size; i++) {
336                 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
337                         n = strlen(names[i][j]);
338                         if (n > longest && !strncasecmp(str, names[i][j], n))
339                                 longest = n;
340                 }
341                 if (longest > 0)
342                         return i;
343         }
344
345         return -1;
346 }
347
348 typedef int config_term_func_t(struct perf_event_attr *attr,
349                                struct parse_events_term *term,
350                                struct parse_events_error *err);
351 static int config_term_common(struct perf_event_attr *attr,
352                               struct parse_events_term *term,
353                               struct parse_events_error *err);
354 static int config_attr(struct perf_event_attr *attr,
355                        struct list_head *head,
356                        struct parse_events_error *err,
357                        config_term_func_t config_term);
358
359 int parse_events_add_cache(struct list_head *list, int *idx,
360                            char *type, char *op_result1, char *op_result2,
361                            struct parse_events_error *err,
362                            struct list_head *head_config,
363                            struct parse_events_state *parse_state)
364 {
365         struct perf_event_attr attr;
366         LIST_HEAD(config_terms);
367         char name[MAX_NAME_LEN];
368         const char *config_name, *metric_id;
369         int cache_type = -1, cache_op = -1, cache_result = -1;
370         char *op_result[2] = { op_result1, op_result2 };
371         int i, n, ret;
372         bool hybrid;
373
374         /*
375          * No fallback - if we cannot get a clear cache type
376          * then bail out:
377          */
378         cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX);
379         if (cache_type == -1)
380                 return -EINVAL;
381
382         config_name = get_config_name(head_config);
383         n = snprintf(name, MAX_NAME_LEN, "%s", type);
384
385         for (i = 0; (i < 2) && (op_result[i]); i++) {
386                 char *str = op_result[i];
387
388                 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
389
390                 if (cache_op == -1) {
391                         cache_op = parse_aliases(str, evsel__hw_cache_op,
392                                                  PERF_COUNT_HW_CACHE_OP_MAX);
393                         if (cache_op >= 0) {
394                                 if (!evsel__is_cache_op_valid(cache_type, cache_op))
395                                         return -EINVAL;
396                                 continue;
397                         }
398                 }
399
400                 if (cache_result == -1) {
401                         cache_result = parse_aliases(str, evsel__hw_cache_result,
402                                                      PERF_COUNT_HW_CACHE_RESULT_MAX);
403                         if (cache_result >= 0)
404                                 continue;
405                 }
406         }
407
408         /*
409          * Fall back to reads:
410          */
411         if (cache_op == -1)
412                 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
413
414         /*
415          * Fall back to accesses:
416          */
417         if (cache_result == -1)
418                 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
419
420         memset(&attr, 0, sizeof(attr));
421         attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
422         attr.type = PERF_TYPE_HW_CACHE;
423
424         if (head_config) {
425                 if (config_attr(&attr, head_config, err,
426                                 config_term_common))
427                         return -EINVAL;
428
429                 if (get_config_terms(head_config, &config_terms))
430                         return -ENOMEM;
431         }
432
433         metric_id = get_config_metric_id(head_config);
434         ret = parse_events__add_cache_hybrid(list, idx, &attr,
435                                              config_name ? : name,
436                                              metric_id,
437                                              &config_terms,
438                                              &hybrid, parse_state);
439         if (hybrid)
440                 goto out_free_terms;
441
442         ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
443                         &config_terms);
444 out_free_terms:
445         free_config_terms(&config_terms);
446         return ret;
447 }
448
449 #ifdef HAVE_LIBTRACEEVENT
450 static void tracepoint_error(struct parse_events_error *e, int err,
451                              const char *sys, const char *name)
452 {
453         const char *str;
454         char help[BUFSIZ];
455
456         if (!e)
457                 return;
458
459         /*
460          * We get error directly from syscall errno ( > 0),
461          * or from encoded pointer's error ( < 0).
462          */
463         err = abs(err);
464
465         switch (err) {
466         case EACCES:
467                 str = "can't access trace events";
468                 break;
469         case ENOENT:
470                 str = "unknown tracepoint";
471                 break;
472         default:
473                 str = "failed to add tracepoint";
474                 break;
475         }
476
477         tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
478         parse_events_error__handle(e, 0, strdup(str), strdup(help));
479 }
480
481 static int add_tracepoint(struct list_head *list, int *idx,
482                           const char *sys_name, const char *evt_name,
483                           struct parse_events_error *err,
484                           struct list_head *head_config)
485 {
486         struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
487
488         if (IS_ERR(evsel)) {
489                 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
490                 return PTR_ERR(evsel);
491         }
492
493         if (head_config) {
494                 LIST_HEAD(config_terms);
495
496                 if (get_config_terms(head_config, &config_terms))
497                         return -ENOMEM;
498                 list_splice(&config_terms, &evsel->config_terms);
499         }
500
501         list_add_tail(&evsel->core.node, list);
502         return 0;
503 }
504
505 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
506                                       const char *sys_name, const char *evt_name,
507                                       struct parse_events_error *err,
508                                       struct list_head *head_config)
509 {
510         char *evt_path;
511         struct dirent *evt_ent;
512         DIR *evt_dir;
513         int ret = 0, found = 0;
514
515         evt_path = get_events_file(sys_name);
516         if (!evt_path) {
517                 tracepoint_error(err, errno, sys_name, evt_name);
518                 return -1;
519         }
520         evt_dir = opendir(evt_path);
521         if (!evt_dir) {
522                 put_events_file(evt_path);
523                 tracepoint_error(err, errno, sys_name, evt_name);
524                 return -1;
525         }
526
527         while (!ret && (evt_ent = readdir(evt_dir))) {
528                 if (!strcmp(evt_ent->d_name, ".")
529                     || !strcmp(evt_ent->d_name, "..")
530                     || !strcmp(evt_ent->d_name, "enable")
531                     || !strcmp(evt_ent->d_name, "filter"))
532                         continue;
533
534                 if (!strglobmatch(evt_ent->d_name, evt_name))
535                         continue;
536
537                 found++;
538
539                 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
540                                      err, head_config);
541         }
542
543         if (!found) {
544                 tracepoint_error(err, ENOENT, sys_name, evt_name);
545                 ret = -1;
546         }
547
548         put_events_file(evt_path);
549         closedir(evt_dir);
550         return ret;
551 }
552
553 static int add_tracepoint_event(struct list_head *list, int *idx,
554                                 const char *sys_name, const char *evt_name,
555                                 struct parse_events_error *err,
556                                 struct list_head *head_config)
557 {
558         return strpbrk(evt_name, "*?") ?
559                add_tracepoint_multi_event(list, idx, sys_name, evt_name,
560                                           err, head_config) :
561                add_tracepoint(list, idx, sys_name, evt_name,
562                               err, head_config);
563 }
564
565 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
566                                     const char *sys_name, const char *evt_name,
567                                     struct parse_events_error *err,
568                                     struct list_head *head_config)
569 {
570         struct dirent *events_ent;
571         DIR *events_dir;
572         int ret = 0;
573
574         events_dir = tracing_events__opendir();
575         if (!events_dir) {
576                 tracepoint_error(err, errno, sys_name, evt_name);
577                 return -1;
578         }
579
580         while (!ret && (events_ent = readdir(events_dir))) {
581                 if (!strcmp(events_ent->d_name, ".")
582                     || !strcmp(events_ent->d_name, "..")
583                     || !strcmp(events_ent->d_name, "enable")
584                     || !strcmp(events_ent->d_name, "header_event")
585                     || !strcmp(events_ent->d_name, "header_page"))
586                         continue;
587
588                 if (!strglobmatch(events_ent->d_name, sys_name))
589                         continue;
590
591                 ret = add_tracepoint_event(list, idx, events_ent->d_name,
592                                            evt_name, err, head_config);
593         }
594
595         closedir(events_dir);
596         return ret;
597 }
598 #endif /* HAVE_LIBTRACEEVENT */
599
600 #ifdef HAVE_LIBBPF_SUPPORT
601 struct __add_bpf_event_param {
602         struct parse_events_state *parse_state;
603         struct list_head *list;
604         struct list_head *head_config;
605 };
606
607 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
608                          void *_param)
609 {
610         LIST_HEAD(new_evsels);
611         struct __add_bpf_event_param *param = _param;
612         struct parse_events_state *parse_state = param->parse_state;
613         struct list_head *list = param->list;
614         struct evsel *pos;
615         int err;
616         /*
617          * Check if we should add the event, i.e. if it is a TP but starts with a '!',
618          * then don't add the tracepoint, this will be used for something else, like
619          * adding to a BPF_MAP_TYPE_PROG_ARRAY.
620          *
621          * See tools/perf/examples/bpf/augmented_raw_syscalls.c
622          */
623         if (group[0] == '!')
624                 return 0;
625
626         pr_debug("add bpf event %s:%s and attach bpf program %d\n",
627                  group, event, fd);
628
629         err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
630                                           event, parse_state->error,
631                                           param->head_config);
632         if (err) {
633                 struct evsel *evsel, *tmp;
634
635                 pr_debug("Failed to add BPF event %s:%s\n",
636                          group, event);
637                 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
638                         list_del_init(&evsel->core.node);
639                         evsel__delete(evsel);
640                 }
641                 return err;
642         }
643         pr_debug("adding %s:%s\n", group, event);
644
645         list_for_each_entry(pos, &new_evsels, core.node) {
646                 pr_debug("adding %s:%s to %p\n",
647                          group, event, pos);
648                 pos->bpf_fd = fd;
649                 pos->bpf_obj = obj;
650         }
651         list_splice(&new_evsels, list);
652         return 0;
653 }
654
655 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
656                               struct list_head *list,
657                               struct bpf_object *obj,
658                               struct list_head *head_config)
659 {
660         int err;
661         char errbuf[BUFSIZ];
662         struct __add_bpf_event_param param = {parse_state, list, head_config};
663         static bool registered_unprobe_atexit = false;
664
665         if (IS_ERR(obj) || !obj) {
666                 snprintf(errbuf, sizeof(errbuf),
667                          "Internal error: load bpf obj with NULL");
668                 err = -EINVAL;
669                 goto errout;
670         }
671
672         /*
673          * Register atexit handler before calling bpf__probe() so
674          * bpf__probe() don't need to unprobe probe points its already
675          * created when failure.
676          */
677         if (!registered_unprobe_atexit) {
678                 atexit(bpf__clear);
679                 registered_unprobe_atexit = true;
680         }
681
682         err = bpf__probe(obj);
683         if (err) {
684                 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
685                 goto errout;
686         }
687
688         err = bpf__load(obj);
689         if (err) {
690                 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
691                 goto errout;
692         }
693
694         err = bpf__foreach_event(obj, add_bpf_event, &param);
695         if (err) {
696                 snprintf(errbuf, sizeof(errbuf),
697                          "Attach events in BPF object failed");
698                 goto errout;
699         }
700
701         return 0;
702 errout:
703         parse_events_error__handle(parse_state->error, 0,
704                                 strdup(errbuf), strdup("(add -v to see detail)"));
705         return err;
706 }
707
708 static int
709 parse_events_config_bpf(struct parse_events_state *parse_state,
710                         struct bpf_object *obj,
711                         struct list_head *head_config)
712 {
713         struct parse_events_term *term;
714         int error_pos;
715
716         if (!head_config || list_empty(head_config))
717                 return 0;
718
719         list_for_each_entry(term, head_config, list) {
720                 int err;
721
722                 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
723                         parse_events_error__handle(parse_state->error, term->err_term,
724                                                 strdup("Invalid config term for BPF object"),
725                                                 NULL);
726                         return -EINVAL;
727                 }
728
729                 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
730                 if (err) {
731                         char errbuf[BUFSIZ];
732                         int idx;
733
734                         bpf__strerror_config_obj(obj, term, parse_state->evlist,
735                                                  &error_pos, err, errbuf,
736                                                  sizeof(errbuf));
737
738                         if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
739                                 idx = term->err_val;
740                         else
741                                 idx = term->err_term + error_pos;
742
743                         parse_events_error__handle(parse_state->error, idx,
744                                                 strdup(errbuf),
745                                                 strdup(
746 "Hint:\tValid config terms:\n"
747 "     \tmap:[<arraymap>].value<indices>=[value]\n"
748 "     \tmap:[<eventmap>].event<indices>=[event]\n"
749 "\n"
750 "     \twhere <indices> is something like [0,3...5] or [all]\n"
751 "     \t(add -v to see detail)"));
752                         return err;
753                 }
754         }
755         return 0;
756 }
757
758 /*
759  * Split config terms:
760  * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
761  *  'call-graph=fp' is 'evt config', should be applied to each
762  *  events in bpf.c.
763  * 'map:array.value[0]=1' is 'obj config', should be processed
764  * with parse_events_config_bpf.
765  *
766  * Move object config terms from the first list to obj_head_config.
767  */
768 static void
769 split_bpf_config_terms(struct list_head *evt_head_config,
770                        struct list_head *obj_head_config)
771 {
772         struct parse_events_term *term, *temp;
773
774         /*
775          * Currently, all possible user config term
776          * belong to bpf object. parse_events__is_hardcoded_term()
777          * happens to be a good flag.
778          *
779          * See parse_events_config_bpf() and
780          * config_term_tracepoint().
781          */
782         list_for_each_entry_safe(term, temp, evt_head_config, list)
783                 if (!parse_events__is_hardcoded_term(term))
784                         list_move_tail(&term->list, obj_head_config);
785 }
786
787 int parse_events_load_bpf(struct parse_events_state *parse_state,
788                           struct list_head *list,
789                           char *bpf_file_name,
790                           bool source,
791                           struct list_head *head_config)
792 {
793         int err;
794         struct bpf_object *obj;
795         LIST_HEAD(obj_head_config);
796
797         if (head_config)
798                 split_bpf_config_terms(head_config, &obj_head_config);
799
800         obj = bpf__prepare_load(bpf_file_name, source);
801         if (IS_ERR(obj)) {
802                 char errbuf[BUFSIZ];
803
804                 err = PTR_ERR(obj);
805
806                 if (err == -ENOTSUP)
807                         snprintf(errbuf, sizeof(errbuf),
808                                  "BPF support is not compiled");
809                 else
810                         bpf__strerror_prepare_load(bpf_file_name,
811                                                    source,
812                                                    -err, errbuf,
813                                                    sizeof(errbuf));
814
815                 parse_events_error__handle(parse_state->error, 0,
816                                         strdup(errbuf), strdup("(add -v to see detail)"));
817                 return err;
818         }
819
820         err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
821         if (err)
822                 return err;
823         err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
824
825         /*
826          * Caller doesn't know anything about obj_head_config,
827          * so combine them together again before returning.
828          */
829         if (head_config)
830                 list_splice_tail(&obj_head_config, head_config);
831         return err;
832 }
833 #else // HAVE_LIBBPF_SUPPORT
834 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
835                               struct list_head *list __maybe_unused,
836                               struct bpf_object *obj __maybe_unused,
837                               struct list_head *head_config __maybe_unused)
838 {
839         parse_events_error__handle(parse_state->error, 0,
840                                    strdup("BPF support is not compiled"),
841                                    strdup("Make sure libbpf-devel is available at build time."));
842         return -ENOTSUP;
843 }
844
845 int parse_events_load_bpf(struct parse_events_state *parse_state,
846                           struct list_head *list __maybe_unused,
847                           char *bpf_file_name __maybe_unused,
848                           bool source __maybe_unused,
849                           struct list_head *head_config __maybe_unused)
850 {
851         parse_events_error__handle(parse_state->error, 0,
852                                    strdup("BPF support is not compiled"),
853                                    strdup("Make sure libbpf-devel is available at build time."));
854         return -ENOTSUP;
855 }
856 #endif // HAVE_LIBBPF_SUPPORT
857
858 static int
859 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
860 {
861         int i;
862
863         for (i = 0; i < 3; i++) {
864                 if (!type || !type[i])
865                         break;
866
867 #define CHECK_SET_TYPE(bit)             \
868 do {                                    \
869         if (attr->bp_type & bit)        \
870                 return -EINVAL;         \
871         else                            \
872                 attr->bp_type |= bit;   \
873 } while (0)
874
875                 switch (type[i]) {
876                 case 'r':
877                         CHECK_SET_TYPE(HW_BREAKPOINT_R);
878                         break;
879                 case 'w':
880                         CHECK_SET_TYPE(HW_BREAKPOINT_W);
881                         break;
882                 case 'x':
883                         CHECK_SET_TYPE(HW_BREAKPOINT_X);
884                         break;
885                 default:
886                         return -EINVAL;
887                 }
888         }
889
890 #undef CHECK_SET_TYPE
891
892         if (!attr->bp_type) /* Default */
893                 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
894
895         return 0;
896 }
897
898 int parse_events_add_breakpoint(struct list_head *list, int *idx,
899                                 u64 addr, char *type, u64 len)
900 {
901         struct perf_event_attr attr;
902
903         memset(&attr, 0, sizeof(attr));
904         attr.bp_addr = addr;
905
906         if (parse_breakpoint_type(type, &attr))
907                 return -EINVAL;
908
909         /* Provide some defaults if len is not specified */
910         if (!len) {
911                 if (attr.bp_type == HW_BREAKPOINT_X)
912                         len = sizeof(long);
913                 else
914                         len = HW_BREAKPOINT_LEN_4;
915         }
916
917         attr.bp_len = len;
918
919         attr.type = PERF_TYPE_BREAKPOINT;
920         attr.sample_period = 1;
921
922         return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL,
923                          /*config_terms=*/NULL);
924 }
925
926 static int check_type_val(struct parse_events_term *term,
927                           struct parse_events_error *err,
928                           int type)
929 {
930         if (type == term->type_val)
931                 return 0;
932
933         if (err) {
934                 parse_events_error__handle(err, term->err_val,
935                                         type == PARSE_EVENTS__TERM_TYPE_NUM
936                                         ? strdup("expected numeric value")
937                                         : strdup("expected string value"),
938                                         NULL);
939         }
940         return -EINVAL;
941 }
942
943 /*
944  * Update according to parse-events.l
945  */
946 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
947         [PARSE_EVENTS__TERM_TYPE_USER]                  = "<sysfs term>",
948         [PARSE_EVENTS__TERM_TYPE_CONFIG]                = "config",
949         [PARSE_EVENTS__TERM_TYPE_CONFIG1]               = "config1",
950         [PARSE_EVENTS__TERM_TYPE_CONFIG2]               = "config2",
951         [PARSE_EVENTS__TERM_TYPE_NAME]                  = "name",
952         [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD]         = "period",
953         [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ]           = "freq",
954         [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE]    = "branch_type",
955         [PARSE_EVENTS__TERM_TYPE_TIME]                  = "time",
956         [PARSE_EVENTS__TERM_TYPE_CALLGRAPH]             = "call-graph",
957         [PARSE_EVENTS__TERM_TYPE_STACKSIZE]             = "stack-size",
958         [PARSE_EVENTS__TERM_TYPE_NOINHERIT]             = "no-inherit",
959         [PARSE_EVENTS__TERM_TYPE_INHERIT]               = "inherit",
960         [PARSE_EVENTS__TERM_TYPE_MAX_STACK]             = "max-stack",
961         [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS]            = "nr",
962         [PARSE_EVENTS__TERM_TYPE_OVERWRITE]             = "overwrite",
963         [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]           = "no-overwrite",
964         [PARSE_EVENTS__TERM_TYPE_DRV_CFG]               = "driver-config",
965         [PARSE_EVENTS__TERM_TYPE_PERCORE]               = "percore",
966         [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT]            = "aux-output",
967         [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE]       = "aux-sample-size",
968         [PARSE_EVENTS__TERM_TYPE_METRIC_ID]             = "metric-id",
969 };
970
971 static bool config_term_shrinked;
972
973 static bool
974 config_term_avail(int term_type, struct parse_events_error *err)
975 {
976         char *err_str;
977
978         if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
979                 parse_events_error__handle(err, -1,
980                                         strdup("Invalid term_type"), NULL);
981                 return false;
982         }
983         if (!config_term_shrinked)
984                 return true;
985
986         switch (term_type) {
987         case PARSE_EVENTS__TERM_TYPE_CONFIG:
988         case PARSE_EVENTS__TERM_TYPE_CONFIG1:
989         case PARSE_EVENTS__TERM_TYPE_CONFIG2:
990         case PARSE_EVENTS__TERM_TYPE_NAME:
991         case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
992         case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
993         case PARSE_EVENTS__TERM_TYPE_PERCORE:
994                 return true;
995         default:
996                 if (!err)
997                         return false;
998
999                 /* term_type is validated so indexing is safe */
1000                 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
1001                                 config_term_names[term_type]) >= 0)
1002                         parse_events_error__handle(err, -1, err_str, NULL);
1003                 return false;
1004         }
1005 }
1006
1007 void parse_events__shrink_config_terms(void)
1008 {
1009         config_term_shrinked = true;
1010 }
1011
1012 static int config_term_common(struct perf_event_attr *attr,
1013                               struct parse_events_term *term,
1014                               struct parse_events_error *err)
1015 {
1016 #define CHECK_TYPE_VAL(type)                                               \
1017 do {                                                                       \
1018         if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
1019                 return -EINVAL;                                            \
1020 } while (0)
1021
1022         switch (term->type_term) {
1023         case PARSE_EVENTS__TERM_TYPE_CONFIG:
1024                 CHECK_TYPE_VAL(NUM);
1025                 attr->config = term->val.num;
1026                 break;
1027         case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1028                 CHECK_TYPE_VAL(NUM);
1029                 attr->config1 = term->val.num;
1030                 break;
1031         case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1032                 CHECK_TYPE_VAL(NUM);
1033                 attr->config2 = term->val.num;
1034                 break;
1035         case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1036                 CHECK_TYPE_VAL(NUM);
1037                 break;
1038         case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1039                 CHECK_TYPE_VAL(NUM);
1040                 break;
1041         case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1042                 CHECK_TYPE_VAL(STR);
1043                 if (strcmp(term->val.str, "no") &&
1044                     parse_branch_str(term->val.str,
1045                                     &attr->branch_sample_type)) {
1046                         parse_events_error__handle(err, term->err_val,
1047                                         strdup("invalid branch sample type"),
1048                                         NULL);
1049                         return -EINVAL;
1050                 }
1051                 break;
1052         case PARSE_EVENTS__TERM_TYPE_TIME:
1053                 CHECK_TYPE_VAL(NUM);
1054                 if (term->val.num > 1) {
1055                         parse_events_error__handle(err, term->err_val,
1056                                                 strdup("expected 0 or 1"),
1057                                                 NULL);
1058                         return -EINVAL;
1059                 }
1060                 break;
1061         case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1062                 CHECK_TYPE_VAL(STR);
1063                 break;
1064         case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1065                 CHECK_TYPE_VAL(NUM);
1066                 break;
1067         case PARSE_EVENTS__TERM_TYPE_INHERIT:
1068                 CHECK_TYPE_VAL(NUM);
1069                 break;
1070         case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1071                 CHECK_TYPE_VAL(NUM);
1072                 break;
1073         case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1074                 CHECK_TYPE_VAL(NUM);
1075                 break;
1076         case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1077                 CHECK_TYPE_VAL(NUM);
1078                 break;
1079         case PARSE_EVENTS__TERM_TYPE_NAME:
1080                 CHECK_TYPE_VAL(STR);
1081                 break;
1082         case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1083                 CHECK_TYPE_VAL(STR);
1084                 break;
1085         case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1086                 CHECK_TYPE_VAL(NUM);
1087                 break;
1088         case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1089                 CHECK_TYPE_VAL(NUM);
1090                 break;
1091         case PARSE_EVENTS__TERM_TYPE_PERCORE:
1092                 CHECK_TYPE_VAL(NUM);
1093                 if ((unsigned int)term->val.num > 1) {
1094                         parse_events_error__handle(err, term->err_val,
1095                                                 strdup("expected 0 or 1"),
1096                                                 NULL);
1097                         return -EINVAL;
1098                 }
1099                 break;
1100         case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1101                 CHECK_TYPE_VAL(NUM);
1102                 break;
1103         case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1104                 CHECK_TYPE_VAL(NUM);
1105                 if (term->val.num > UINT_MAX) {
1106                         parse_events_error__handle(err, term->err_val,
1107                                                 strdup("too big"),
1108                                                 NULL);
1109                         return -EINVAL;
1110                 }
1111                 break;
1112         default:
1113                 parse_events_error__handle(err, term->err_term,
1114                                 strdup("unknown term"),
1115                                 parse_events_formats_error_string(NULL));
1116                 return -EINVAL;
1117         }
1118
1119         /*
1120          * Check term availability after basic checking so
1121          * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1122          *
1123          * If check availability at the entry of this function,
1124          * user will see "'<sysfs term>' is not usable in 'perf stat'"
1125          * if an invalid config term is provided for legacy events
1126          * (for example, instructions/badterm/...), which is confusing.
1127          */
1128         if (!config_term_avail(term->type_term, err))
1129                 return -EINVAL;
1130         return 0;
1131 #undef CHECK_TYPE_VAL
1132 }
1133
1134 static int config_term_pmu(struct perf_event_attr *attr,
1135                            struct parse_events_term *term,
1136                            struct parse_events_error *err)
1137 {
1138         if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1139             term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG)
1140                 /*
1141                  * Always succeed for sysfs terms, as we dont know
1142                  * at this point what type they need to have.
1143                  */
1144                 return 0;
1145         else
1146                 return config_term_common(attr, term, err);
1147 }
1148
1149 #ifdef HAVE_LIBTRACEEVENT
1150 static int config_term_tracepoint(struct perf_event_attr *attr,
1151                                   struct parse_events_term *term,
1152                                   struct parse_events_error *err)
1153 {
1154         switch (term->type_term) {
1155         case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1156         case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1157         case PARSE_EVENTS__TERM_TYPE_INHERIT:
1158         case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1159         case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1160         case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1161         case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1162         case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1163         case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1164         case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1165                 return config_term_common(attr, term, err);
1166         default:
1167                 if (err) {
1168                         parse_events_error__handle(err, term->err_term,
1169                                 strdup("unknown term"),
1170                                 strdup("valid terms: call-graph,stack-size\n"));
1171                 }
1172                 return -EINVAL;
1173         }
1174
1175         return 0;
1176 }
1177 #endif
1178
1179 static int config_attr(struct perf_event_attr *attr,
1180                        struct list_head *head,
1181                        struct parse_events_error *err,
1182                        config_term_func_t config_term)
1183 {
1184         struct parse_events_term *term;
1185
1186         list_for_each_entry(term, head, list)
1187                 if (config_term(attr, term, err))
1188                         return -EINVAL;
1189
1190         return 0;
1191 }
1192
1193 static int get_config_terms(struct list_head *head_config,
1194                             struct list_head *head_terms __maybe_unused)
1195 {
1196 #define ADD_CONFIG_TERM(__type, __weak)                         \
1197         struct evsel_config_term *__t;                  \
1198                                                                 \
1199         __t = zalloc(sizeof(*__t));                             \
1200         if (!__t)                                               \
1201                 return -ENOMEM;                                 \
1202                                                                 \
1203         INIT_LIST_HEAD(&__t->list);                             \
1204         __t->type       = EVSEL__CONFIG_TERM_ ## __type;        \
1205         __t->weak       = __weak;                               \
1206         list_add_tail(&__t->list, head_terms)
1207
1208 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)      \
1209 do {                                                            \
1210         ADD_CONFIG_TERM(__type, __weak);                        \
1211         __t->val.__name = __val;                                \
1212 } while (0)
1213
1214 #define ADD_CONFIG_TERM_STR(__type, __val, __weak)              \
1215 do {                                                            \
1216         ADD_CONFIG_TERM(__type, __weak);                        \
1217         __t->val.str = strdup(__val);                           \
1218         if (!__t->val.str) {                                    \
1219                 zfree(&__t);                                    \
1220                 return -ENOMEM;                                 \
1221         }                                                       \
1222         __t->free_str = true;                                   \
1223 } while (0)
1224
1225         struct parse_events_term *term;
1226
1227         list_for_each_entry(term, head_config, list) {
1228                 switch (term->type_term) {
1229                 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1230                         ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1231                         break;
1232                 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1233                         ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1234                         break;
1235                 case PARSE_EVENTS__TERM_TYPE_TIME:
1236                         ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1237                         break;
1238                 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1239                         ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1240                         break;
1241                 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1242                         ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1243                         break;
1244                 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1245                         ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1246                                             term->val.num, term->weak);
1247                         break;
1248                 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1249                         ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1250                                             term->val.num ? 1 : 0, term->weak);
1251                         break;
1252                 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1253                         ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1254                                             term->val.num ? 0 : 1, term->weak);
1255                         break;
1256                 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1257                         ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1258                                             term->val.num, term->weak);
1259                         break;
1260                 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1261                         ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1262                                             term->val.num, term->weak);
1263                         break;
1264                 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1265                         ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1266                                             term->val.num ? 1 : 0, term->weak);
1267                         break;
1268                 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1269                         ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1270                                             term->val.num ? 0 : 1, term->weak);
1271                         break;
1272                 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1273                         ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1274                         break;
1275                 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1276                         ADD_CONFIG_TERM_VAL(PERCORE, percore,
1277                                             term->val.num ? true : false, term->weak);
1278                         break;
1279                 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1280                         ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1281                                             term->val.num ? 1 : 0, term->weak);
1282                         break;
1283                 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1284                         ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1285                                             term->val.num, term->weak);
1286                         break;
1287                 default:
1288                         break;
1289                 }
1290         }
1291         return 0;
1292 }
1293
1294 /*
1295  * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1296  * each bit of attr->config that the user has changed.
1297  */
1298 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
1299                            struct list_head *head_terms)
1300 {
1301         struct parse_events_term *term;
1302         u64 bits = 0;
1303         int type;
1304
1305         list_for_each_entry(term, head_config, list) {
1306                 switch (term->type_term) {
1307                 case PARSE_EVENTS__TERM_TYPE_USER:
1308                         type = perf_pmu__format_type(&pmu->format, term->config);
1309                         if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1310                                 continue;
1311                         bits |= perf_pmu__format_bits(&pmu->format, term->config);
1312                         break;
1313                 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1314                         bits = ~(u64)0;
1315                         break;
1316                 default:
1317                         break;
1318                 }
1319         }
1320
1321         if (bits)
1322                 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1323
1324 #undef ADD_CONFIG_TERM
1325         return 0;
1326 }
1327
1328 int parse_events_add_tracepoint(struct list_head *list, int *idx,
1329                                 const char *sys, const char *event,
1330                                 struct parse_events_error *err,
1331                                 struct list_head *head_config)
1332 {
1333 #ifdef HAVE_LIBTRACEEVENT
1334         if (head_config) {
1335                 struct perf_event_attr attr;
1336
1337                 if (config_attr(&attr, head_config, err,
1338                                 config_term_tracepoint))
1339                         return -EINVAL;
1340         }
1341
1342         if (strpbrk(sys, "*?"))
1343                 return add_tracepoint_multi_sys(list, idx, sys, event,
1344                                                 err, head_config);
1345         else
1346                 return add_tracepoint_event(list, idx, sys, event,
1347                                             err, head_config);
1348 #else
1349         (void)list;
1350         (void)idx;
1351         (void)sys;
1352         (void)event;
1353         (void)head_config;
1354         parse_events_error__handle(err, 0, strdup("unsupported tracepoint"),
1355                                 strdup("libtraceevent is necessary for tracepoint support"));
1356         return -1;
1357 #endif
1358 }
1359
1360 int parse_events_add_numeric(struct parse_events_state *parse_state,
1361                              struct list_head *list,
1362                              u32 type, u64 config,
1363                              struct list_head *head_config)
1364 {
1365         struct perf_event_attr attr;
1366         LIST_HEAD(config_terms);
1367         const char *name, *metric_id;
1368         bool hybrid;
1369         int ret;
1370
1371         memset(&attr, 0, sizeof(attr));
1372         attr.type = type;
1373         attr.config = config;
1374
1375         if (head_config) {
1376                 if (config_attr(&attr, head_config, parse_state->error,
1377                                 config_term_common))
1378                         return -EINVAL;
1379
1380                 if (get_config_terms(head_config, &config_terms))
1381                         return -ENOMEM;
1382         }
1383
1384         name = get_config_name(head_config);
1385         metric_id = get_config_metric_id(head_config);
1386         ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
1387                                                name, metric_id,
1388                                                &config_terms, &hybrid);
1389         if (hybrid)
1390                 goto out_free_terms;
1391
1392         ret = add_event(list, &parse_state->idx, &attr, name, metric_id,
1393                         &config_terms);
1394 out_free_terms:
1395         free_config_terms(&config_terms);
1396         return ret;
1397 }
1398
1399 int parse_events_add_tool(struct parse_events_state *parse_state,
1400                           struct list_head *list,
1401                           int tool_event)
1402 {
1403         return add_event_tool(list, &parse_state->idx, tool_event);
1404 }
1405
1406 static bool config_term_percore(struct list_head *config_terms)
1407 {
1408         struct evsel_config_term *term;
1409
1410         list_for_each_entry(term, config_terms, list) {
1411                 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1412                         return term->val.percore;
1413         }
1414
1415         return false;
1416 }
1417
1418 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
1419                                            struct list_head *list, char *name,
1420                                            struct list_head *head_config)
1421 {
1422         struct parse_events_term *term;
1423         int ret = -1;
1424
1425         if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
1426             !perf_pmu__is_hybrid(name)) {
1427                 return -1;
1428         }
1429
1430         /*
1431          * More than one term in list.
1432          */
1433         if (head_config->next && head_config->next->next != head_config)
1434                 return -1;
1435
1436         term = list_first_entry(head_config, struct parse_events_term, list);
1437         if (term && term->config && strcmp(term->config, "event")) {
1438                 ret = parse_events__with_hybrid_pmu(parse_state, term->config,
1439                                                     name, list);
1440         }
1441
1442         return ret;
1443 }
1444
1445 int parse_events_add_pmu(struct parse_events_state *parse_state,
1446                          struct list_head *list, char *name,
1447                          struct list_head *head_config,
1448                          bool auto_merge_stats)
1449 {
1450         struct perf_event_attr attr;
1451         struct perf_pmu_info info;
1452         struct perf_pmu *pmu;
1453         struct evsel *evsel;
1454         struct parse_events_error *err = parse_state->error;
1455         LIST_HEAD(config_terms);
1456
1457         pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
1458
1459         if (verbose > 1 && !(pmu && pmu->selectable)) {
1460                 fprintf(stderr, "Attempting to add event pmu '%s' with '",
1461                         name);
1462                 if (head_config) {
1463                         struct parse_events_term *term;
1464
1465                         list_for_each_entry(term, head_config, list) {
1466                                 fprintf(stderr, "%s,", term->config);
1467                         }
1468                 }
1469                 fprintf(stderr, "' that may result in non-fatal errors\n");
1470         }
1471
1472         if (!pmu) {
1473                 char *err_str;
1474
1475                 if (asprintf(&err_str,
1476                                 "Cannot find PMU `%s'. Missing kernel support?",
1477                                 name) >= 0)
1478                         parse_events_error__handle(err, 0, err_str, NULL);
1479                 return -EINVAL;
1480         }
1481
1482         if (pmu->default_config) {
1483                 memcpy(&attr, pmu->default_config,
1484                        sizeof(struct perf_event_attr));
1485         } else {
1486                 memset(&attr, 0, sizeof(attr));
1487         }
1488
1489         if (!head_config) {
1490                 attr.type = pmu->type;
1491                 evsel = __add_event(list, &parse_state->idx, &attr,
1492                                     /*init_attr=*/true, /*name=*/NULL,
1493                                     /*metric_id=*/NULL, pmu,
1494                                     /*config_terms=*/NULL, auto_merge_stats,
1495                                     /*cpu_list=*/NULL);
1496                 if (evsel) {
1497                         evsel->pmu_name = name ? strdup(name) : NULL;
1498                         return 0;
1499                 } else {
1500                         return -ENOMEM;
1501                 }
1502         }
1503
1504         if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
1505                 return -EINVAL;
1506
1507         if (verbose > 1) {
1508                 fprintf(stderr, "After aliases, add event pmu '%s' with '",
1509                         name);
1510                 if (head_config) {
1511                         struct parse_events_term *term;
1512
1513                         list_for_each_entry(term, head_config, list) {
1514                                 fprintf(stderr, "%s,", term->config);
1515                         }
1516                 }
1517                 fprintf(stderr, "' that may result in non-fatal errors\n");
1518         }
1519
1520         /*
1521          * Configure hardcoded terms first, no need to check
1522          * return value when called with fail == 0 ;)
1523          */
1524         if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
1525                 return -EINVAL;
1526
1527         if (get_config_terms(head_config, &config_terms))
1528                 return -ENOMEM;
1529
1530         /*
1531          * When using default config, record which bits of attr->config were
1532          * changed by the user.
1533          */
1534         if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
1535                 return -ENOMEM;
1536
1537         if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
1538                                              head_config)) {
1539                 return 0;
1540         }
1541
1542         if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
1543                 free_config_terms(&config_terms);
1544                 return -EINVAL;
1545         }
1546
1547         evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1548                             get_config_name(head_config),
1549                             get_config_metric_id(head_config), pmu,
1550                             &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
1551         if (!evsel)
1552                 return -ENOMEM;
1553
1554         if (evsel->name)
1555                 evsel->use_config_name = true;
1556
1557         evsel->pmu_name = name ? strdup(name) : NULL;
1558         evsel->percore = config_term_percore(&evsel->config_terms);
1559
1560         if (parse_state->fake_pmu)
1561                 return 0;
1562
1563         free((char *)evsel->unit);
1564         evsel->unit = strdup(info.unit);
1565         evsel->scale = info.scale;
1566         evsel->per_pkg = info.per_pkg;
1567         evsel->snapshot = info.snapshot;
1568         return 0;
1569 }
1570
1571 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1572                                char *str, struct list_head *head,
1573                                struct list_head **listp)
1574 {
1575         struct parse_events_term *term;
1576         struct list_head *list = NULL;
1577         struct list_head *orig_head = NULL;
1578         struct perf_pmu *pmu = NULL;
1579         int ok = 0;
1580         char *config;
1581
1582         *listp = NULL;
1583
1584         if (!head) {
1585                 head = malloc(sizeof(struct list_head));
1586                 if (!head)
1587                         goto out_err;
1588
1589                 INIT_LIST_HEAD(head);
1590         }
1591         config = strdup(str);
1592         if (!config)
1593                 goto out_err;
1594
1595         if (parse_events_term__num(&term,
1596                                    PARSE_EVENTS__TERM_TYPE_USER,
1597                                    config, 1, false, &config,
1598                                         NULL) < 0) {
1599                 free(config);
1600                 goto out_err;
1601         }
1602         list_add_tail(&term->list, head);
1603
1604         /* Add it for all PMUs that support the alias */
1605         list = malloc(sizeof(struct list_head));
1606         if (!list)
1607                 goto out_err;
1608
1609         INIT_LIST_HEAD(list);
1610
1611         while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1612                 struct perf_pmu_alias *alias;
1613
1614                 list_for_each_entry(alias, &pmu->aliases, list) {
1615                         if (!strcasecmp(alias->name, str)) {
1616                                 parse_events_copy_term_list(head, &orig_head);
1617                                 if (!parse_events_add_pmu(parse_state, list,
1618                                                           pmu->name, orig_head,
1619                                                           /*auto_merge_stats=*/true)) {
1620                                         pr_debug("%s -> %s/%s/\n", str,
1621                                                  pmu->name, alias->str);
1622                                         ok++;
1623                                 }
1624                                 parse_events_terms__delete(orig_head);
1625                         }
1626                 }
1627         }
1628
1629         if (parse_state->fake_pmu) {
1630                 if (!parse_events_add_pmu(parse_state, list, str, head,
1631                                           /*auto_merge_stats=*/true)) {
1632                         pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
1633                         ok++;
1634                 }
1635         }
1636
1637 out_err:
1638         if (ok)
1639                 *listp = list;
1640         else
1641                 free(list);
1642
1643         parse_events_terms__delete(head);
1644         return ok ? 0 : -1;
1645 }
1646
1647 int parse_events__modifier_group(struct list_head *list,
1648                                  char *event_mod)
1649 {
1650         return parse_events__modifier_event(list, event_mod, true);
1651 }
1652
1653 void parse_events__set_leader(char *name, struct list_head *list)
1654 {
1655         struct evsel *leader;
1656
1657         if (list_empty(list)) {
1658                 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1659                 return;
1660         }
1661
1662         leader = list_first_entry(list, struct evsel, core.node);
1663         __perf_evlist__set_leader(list, &leader->core);
1664         leader->group_name = name;
1665 }
1666
1667 /* list_event is assumed to point to malloc'ed memory */
1668 void parse_events_update_lists(struct list_head *list_event,
1669                                struct list_head *list_all)
1670 {
1671         /*
1672          * Called for single event definition. Update the
1673          * 'all event' list, and reinit the 'single event'
1674          * list, for next event definition.
1675          */
1676         list_splice_tail(list_event, list_all);
1677         free(list_event);
1678 }
1679
1680 struct event_modifier {
1681         int eu;
1682         int ek;
1683         int eh;
1684         int eH;
1685         int eG;
1686         int eI;
1687         int precise;
1688         int precise_max;
1689         int exclude_GH;
1690         int sample_read;
1691         int pinned;
1692         int weak;
1693         int exclusive;
1694         int bpf_counter;
1695 };
1696
1697 static int get_event_modifier(struct event_modifier *mod, char *str,
1698                                struct evsel *evsel)
1699 {
1700         int eu = evsel ? evsel->core.attr.exclude_user : 0;
1701         int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
1702         int eh = evsel ? evsel->core.attr.exclude_hv : 0;
1703         int eH = evsel ? evsel->core.attr.exclude_host : 0;
1704         int eG = evsel ? evsel->core.attr.exclude_guest : 0;
1705         int eI = evsel ? evsel->core.attr.exclude_idle : 0;
1706         int precise = evsel ? evsel->core.attr.precise_ip : 0;
1707         int precise_max = 0;
1708         int sample_read = 0;
1709         int pinned = evsel ? evsel->core.attr.pinned : 0;
1710         int exclusive = evsel ? evsel->core.attr.exclusive : 0;
1711
1712         int exclude = eu | ek | eh;
1713         int exclude_GH = evsel ? evsel->exclude_GH : 0;
1714         int weak = 0;
1715         int bpf_counter = 0;
1716
1717         memset(mod, 0, sizeof(*mod));
1718
1719         while (*str) {
1720                 if (*str == 'u') {
1721                         if (!exclude)
1722                                 exclude = eu = ek = eh = 1;
1723                         if (!exclude_GH && !perf_guest)
1724                                 eG = 1;
1725                         eu = 0;
1726                 } else if (*str == 'k') {
1727                         if (!exclude)
1728                                 exclude = eu = ek = eh = 1;
1729                         ek = 0;
1730                 } else if (*str == 'h') {
1731                         if (!exclude)
1732                                 exclude = eu = ek = eh = 1;
1733                         eh = 0;
1734                 } else if (*str == 'G') {
1735                         if (!exclude_GH)
1736                                 exclude_GH = eG = eH = 1;
1737                         eG = 0;
1738                 } else if (*str == 'H') {
1739                         if (!exclude_GH)
1740                                 exclude_GH = eG = eH = 1;
1741                         eH = 0;
1742                 } else if (*str == 'I') {
1743                         eI = 1;
1744                 } else if (*str == 'p') {
1745                         precise++;
1746                         /* use of precise requires exclude_guest */
1747                         if (!exclude_GH)
1748                                 eG = 1;
1749                 } else if (*str == 'P') {
1750                         precise_max = 1;
1751                 } else if (*str == 'S') {
1752                         sample_read = 1;
1753                 } else if (*str == 'D') {
1754                         pinned = 1;
1755                 } else if (*str == 'e') {
1756                         exclusive = 1;
1757                 } else if (*str == 'W') {
1758                         weak = 1;
1759                 } else if (*str == 'b') {
1760                         bpf_counter = 1;
1761                 } else
1762                         break;
1763
1764                 ++str;
1765         }
1766
1767         /*
1768          * precise ip:
1769          *
1770          *  0 - SAMPLE_IP can have arbitrary skid
1771          *  1 - SAMPLE_IP must have constant skid
1772          *  2 - SAMPLE_IP requested to have 0 skid
1773          *  3 - SAMPLE_IP must have 0 skid
1774          *
1775          *  See also PERF_RECORD_MISC_EXACT_IP
1776          */
1777         if (precise > 3)
1778                 return -EINVAL;
1779
1780         mod->eu = eu;
1781         mod->ek = ek;
1782         mod->eh = eh;
1783         mod->eH = eH;
1784         mod->eG = eG;
1785         mod->eI = eI;
1786         mod->precise = precise;
1787         mod->precise_max = precise_max;
1788         mod->exclude_GH = exclude_GH;
1789         mod->sample_read = sample_read;
1790         mod->pinned = pinned;
1791         mod->weak = weak;
1792         mod->bpf_counter = bpf_counter;
1793         mod->exclusive = exclusive;
1794
1795         return 0;
1796 }
1797
1798 /*
1799  * Basic modifier sanity check to validate it contains only one
1800  * instance of any modifier (apart from 'p') present.
1801  */
1802 static int check_modifier(char *str)
1803 {
1804         char *p = str;
1805
1806         /* The sizeof includes 0 byte as well. */
1807         if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
1808                 return -1;
1809
1810         while (*p) {
1811                 if (*p != 'p' && strchr(p + 1, *p))
1812                         return -1;
1813                 p++;
1814         }
1815
1816         return 0;
1817 }
1818
1819 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
1820 {
1821         struct evsel *evsel;
1822         struct event_modifier mod;
1823
1824         if (str == NULL)
1825                 return 0;
1826
1827         if (check_modifier(str))
1828                 return -EINVAL;
1829
1830         if (!add && get_event_modifier(&mod, str, NULL))
1831                 return -EINVAL;
1832
1833         __evlist__for_each_entry(list, evsel) {
1834                 if (add && get_event_modifier(&mod, str, evsel))
1835                         return -EINVAL;
1836
1837                 evsel->core.attr.exclude_user   = mod.eu;
1838                 evsel->core.attr.exclude_kernel = mod.ek;
1839                 evsel->core.attr.exclude_hv     = mod.eh;
1840                 evsel->core.attr.precise_ip     = mod.precise;
1841                 evsel->core.attr.exclude_host   = mod.eH;
1842                 evsel->core.attr.exclude_guest  = mod.eG;
1843                 evsel->core.attr.exclude_idle   = mod.eI;
1844                 evsel->exclude_GH          = mod.exclude_GH;
1845                 evsel->sample_read         = mod.sample_read;
1846                 evsel->precise_max         = mod.precise_max;
1847                 evsel->weak_group          = mod.weak;
1848                 evsel->bpf_counter         = mod.bpf_counter;
1849
1850                 if (evsel__is_group_leader(evsel)) {
1851                         evsel->core.attr.pinned = mod.pinned;
1852                         evsel->core.attr.exclusive = mod.exclusive;
1853                 }
1854         }
1855
1856         return 0;
1857 }
1858
1859 int parse_events_name(struct list_head *list, const char *name)
1860 {
1861         struct evsel *evsel;
1862
1863         __evlist__for_each_entry(list, evsel) {
1864                 if (!evsel->name)
1865                         evsel->name = strdup(name);
1866         }
1867
1868         return 0;
1869 }
1870
1871 static int
1872 comp_pmu(const void *p1, const void *p2)
1873 {
1874         struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
1875         struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
1876
1877         return strcasecmp(pmu1->symbol, pmu2->symbol);
1878 }
1879
1880 static void perf_pmu__parse_cleanup(void)
1881 {
1882         if (perf_pmu_events_list_num > 0) {
1883                 struct perf_pmu_event_symbol *p;
1884                 int i;
1885
1886                 for (i = 0; i < perf_pmu_events_list_num; i++) {
1887                         p = perf_pmu_events_list + i;
1888                         zfree(&p->symbol);
1889                 }
1890                 zfree(&perf_pmu_events_list);
1891                 perf_pmu_events_list_num = 0;
1892         }
1893 }
1894
1895 #define SET_SYMBOL(str, stype)          \
1896 do {                                    \
1897         p->symbol = str;                \
1898         if (!p->symbol)                 \
1899                 goto err;               \
1900         p->type = stype;                \
1901 } while (0)
1902
1903 /*
1904  * Read the pmu events list from sysfs
1905  * Save it into perf_pmu_events_list
1906  */
1907 static void perf_pmu__parse_init(void)
1908 {
1909
1910         struct perf_pmu *pmu = NULL;
1911         struct perf_pmu_alias *alias;
1912         int len = 0;
1913
1914         pmu = NULL;
1915         while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1916                 list_for_each_entry(alias, &pmu->aliases, list) {
1917                         char *tmp = strchr(alias->name, '-');
1918
1919                         if (tmp) {
1920                                 char *tmp2 = NULL;
1921
1922                                 tmp2 = strchr(tmp + 1, '-');
1923                                 len++;
1924                                 if (tmp2)
1925                                         len++;
1926                         }
1927
1928                         len++;
1929                 }
1930         }
1931
1932         if (len == 0) {
1933                 perf_pmu_events_list_num = -1;
1934                 return;
1935         }
1936         perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
1937         if (!perf_pmu_events_list)
1938                 return;
1939         perf_pmu_events_list_num = len;
1940
1941         len = 0;
1942         pmu = NULL;
1943         while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1944                 list_for_each_entry(alias, &pmu->aliases, list) {
1945                         struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
1946                         char *tmp = strchr(alias->name, '-');
1947                         char *tmp2 = NULL;
1948
1949                         if (tmp)
1950                                 tmp2 = strchr(tmp + 1, '-');
1951                         if (tmp2) {
1952                                 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
1953                                                 PMU_EVENT_SYMBOL_PREFIX);
1954                                 p++;
1955                                 tmp++;
1956                                 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX);
1957                                 p++;
1958                                 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2);
1959                                 len += 3;
1960                         } else if (tmp) {
1961                                 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
1962                                                 PMU_EVENT_SYMBOL_PREFIX);
1963                                 p++;
1964                                 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
1965                                 len += 2;
1966                         } else {
1967                                 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
1968                                 len++;
1969                         }
1970                 }
1971         }
1972         qsort(perf_pmu_events_list, len,
1973                 sizeof(struct perf_pmu_event_symbol), comp_pmu);
1974
1975         return;
1976 err:
1977         perf_pmu__parse_cleanup();
1978 }
1979
1980 /*
1981  * This function injects special term in
1982  * perf_pmu_events_list so the test code
1983  * can check on this functionality.
1984  */
1985 int perf_pmu__test_parse_init(void)
1986 {
1987         struct perf_pmu_event_symbol *list, *tmp, symbols[] = {
1988                 {(char *)"read", PMU_EVENT_SYMBOL},
1989                 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX},
1990                 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX},
1991                 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX},
1992                 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2},
1993         };
1994         unsigned long i, j;
1995
1996         tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols));
1997         if (!list)
1998                 return -ENOMEM;
1999
2000         for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
2001                 tmp->type = symbols[i].type;
2002                 tmp->symbol = strdup(symbols[i].symbol);
2003                 if (!tmp->symbol)
2004                         goto err_free;
2005         }
2006
2007         perf_pmu_events_list = list;
2008         perf_pmu_events_list_num = ARRAY_SIZE(symbols);
2009
2010         qsort(perf_pmu_events_list, ARRAY_SIZE(symbols),
2011               sizeof(struct perf_pmu_event_symbol), comp_pmu);
2012         return 0;
2013
2014 err_free:
2015         for (j = 0, tmp = list; j < i; j++, tmp++)
2016                 free(tmp->symbol);
2017         free(list);
2018         return -ENOMEM;
2019 }
2020
2021 enum perf_pmu_event_symbol_type
2022 perf_pmu__parse_check(const char *name)
2023 {
2024         struct perf_pmu_event_symbol p, *r;
2025
2026         /* scan kernel pmu events from sysfs if needed */
2027         if (perf_pmu_events_list_num == 0)
2028                 perf_pmu__parse_init();
2029         /*
2030          * name "cpu" could be prefix of cpu-cycles or cpu// events.
2031          * cpu-cycles has been handled by hardcode.
2032          * So it must be cpu// events, not kernel pmu event.
2033          */
2034         if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
2035                 return PMU_EVENT_SYMBOL_ERR;
2036
2037         p.symbol = strdup(name);
2038         r = bsearch(&p, perf_pmu_events_list,
2039                         (size_t) perf_pmu_events_list_num,
2040                         sizeof(struct perf_pmu_event_symbol), comp_pmu);
2041         zfree(&p.symbol);
2042         return r ? r->type : PMU_EVENT_SYMBOL_ERR;
2043 }
2044
2045 static int parse_events__scanner(const char *str,
2046                                  struct parse_events_state *parse_state)
2047 {
2048         YY_BUFFER_STATE buffer;
2049         void *scanner;
2050         int ret;
2051
2052         ret = parse_events_lex_init_extra(parse_state, &scanner);
2053         if (ret)
2054                 return ret;
2055
2056         buffer = parse_events__scan_string(str, scanner);
2057
2058 #ifdef PARSER_DEBUG
2059         parse_events_debug = 1;
2060         parse_events_set_debug(1, scanner);
2061 #endif
2062         ret = parse_events_parse(parse_state, scanner);
2063
2064         parse_events__flush_buffer(buffer, scanner);
2065         parse_events__delete_buffer(buffer, scanner);
2066         parse_events_lex_destroy(scanner);
2067         return ret;
2068 }
2069
2070 /*
2071  * parse event config string, return a list of event terms.
2072  */
2073 int parse_events_terms(struct list_head *terms, const char *str)
2074 {
2075         struct parse_events_state parse_state = {
2076                 .terms  = NULL,
2077                 .stoken = PE_START_TERMS,
2078         };
2079         int ret;
2080
2081         ret = parse_events__scanner(str, &parse_state);
2082         perf_pmu__parse_cleanup();
2083
2084         if (!ret) {
2085                 list_splice(parse_state.terms, terms);
2086                 zfree(&parse_state.terms);
2087                 return 0;
2088         }
2089
2090         parse_events_terms__delete(parse_state.terms);
2091         return ret;
2092 }
2093
2094 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
2095                                          const char *str, char *pmu_name,
2096                                          struct list_head *list)
2097 {
2098         struct parse_events_state ps = {
2099                 .list            = LIST_HEAD_INIT(ps.list),
2100                 .stoken          = PE_START_EVENTS,
2101                 .hybrid_pmu_name = pmu_name,
2102                 .idx             = parse_state->idx,
2103         };
2104         int ret;
2105
2106         ret = parse_events__scanner(str, &ps);
2107         perf_pmu__parse_cleanup();
2108
2109         if (!ret) {
2110                 if (!list_empty(&ps.list)) {
2111                         list_splice(&ps.list, list);
2112                         parse_state->idx = ps.idx;
2113                         return 0;
2114                 } else
2115                         return -1;
2116         }
2117
2118         return ret;
2119 }
2120
2121 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2122 {
2123         /* Order by insertion index. */
2124         return lhs->core.idx - rhs->core.idx;
2125 }
2126
2127 static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r)
2128 {
2129         const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2130         const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2131         const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2132         const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2133         int *leader_idx = state;
2134         int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret;
2135         const char *lhs_pmu_name, *rhs_pmu_name;
2136
2137         /*
2138          * First sort by grouping/leader. Read the leader idx only if the evsel
2139          * is part of a group, as -1 indicates no group.
2140          */
2141         if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1)
2142                 lhs_leader_idx = lhs_core->leader->idx;
2143         if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1)
2144                 rhs_leader_idx = rhs_core->leader->idx;
2145
2146         if (lhs_leader_idx != rhs_leader_idx)
2147                 return lhs_leader_idx - rhs_leader_idx;
2148
2149         /* Group by PMU. Groups can't span PMUs. */
2150         lhs_pmu_name = evsel__group_pmu_name(lhs);
2151         rhs_pmu_name = evsel__group_pmu_name(rhs);
2152         ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2153         if (ret)
2154                 return ret;
2155
2156         /* Architecture specific sorting. */
2157         return arch_evlist__cmp(lhs, rhs);
2158 }
2159
2160 static void parse_events__sort_events_and_fix_groups(struct list_head *list)
2161 {
2162         int idx = -1;
2163         struct evsel *pos, *cur_leader = NULL;
2164         struct perf_evsel *cur_leaders_grp = NULL;
2165
2166         /*
2167          * Compute index to insert ungrouped events at. Place them where the
2168          * first ungrouped event appears.
2169          */
2170         list_for_each_entry(pos, list, core.node) {
2171                 const struct evsel *pos_leader = evsel__leader(pos);
2172
2173                 if (pos != pos_leader || pos->core.nr_members > 1)
2174                         continue;
2175
2176                 idx = pos->core.idx;
2177                 break;
2178         }
2179
2180         /* Sort events. */
2181         list_sort(&idx, list, evlist__cmp);
2182
2183         /*
2184          * Recompute groups, splitting for PMUs and adding groups for events
2185          * that require them.
2186          */
2187         idx = 0;
2188         list_for_each_entry(pos, list, core.node) {
2189                 const struct evsel *pos_leader = evsel__leader(pos);
2190                 const char *pos_pmu_name = evsel__group_pmu_name(pos);
2191                 const char *cur_leader_pmu_name, *pos_leader_pmu_name;
2192                 bool force_grouped = arch_evsel__must_be_in_group(pos);
2193
2194                 /* Reset index and nr_members. */
2195                 pos->core.idx = idx++;
2196                 pos->core.nr_members = 0;
2197
2198                 /*
2199                  * Set the group leader respecting the given groupings and that
2200                  * groups can't span PMUs.
2201                  */
2202                 if (!cur_leader)
2203                         cur_leader = pos;
2204
2205                 cur_leader_pmu_name = evsel__group_pmu_name(cur_leader);
2206                 if ((cur_leaders_grp != pos->core.leader && !force_grouped) ||
2207                     strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2208                         /* Event is for a different group/PMU than last. */
2209                         cur_leader = pos;
2210                         /*
2211                          * Remember the leader's group before it is overwritten,
2212                          * so that later events match as being in the same
2213                          * group.
2214                          */
2215                         cur_leaders_grp = pos->core.leader;
2216                 }
2217                 pos_leader_pmu_name = evsel__group_pmu_name(pos_leader);
2218                 if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) {
2219                         /*
2220                          * Event's PMU differs from its leader's. Groups can't
2221                          * span PMUs, so update leader from the group/PMU
2222                          * tracker.
2223                          */
2224                         evsel__set_leader(pos, cur_leader);
2225                 }
2226         }
2227         list_for_each_entry(pos, list, core.node) {
2228                 pos->core.leader->nr_members++;
2229         }
2230 }
2231
2232 int __parse_events(struct evlist *evlist, const char *str,
2233                    struct parse_events_error *err, struct perf_pmu *fake_pmu)
2234 {
2235         struct parse_events_state parse_state = {
2236                 .list     = LIST_HEAD_INIT(parse_state.list),
2237                 .idx      = evlist->core.nr_entries,
2238                 .error    = err,
2239                 .evlist   = evlist,
2240                 .stoken   = PE_START_EVENTS,
2241                 .fake_pmu = fake_pmu,
2242         };
2243         int ret;
2244
2245         ret = parse_events__scanner(str, &parse_state);
2246         perf_pmu__parse_cleanup();
2247
2248         if (!ret && list_empty(&parse_state.list)) {
2249                 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2250                 return -1;
2251         }
2252
2253         parse_events__sort_events_and_fix_groups(&parse_state.list);
2254
2255         /*
2256          * Add list to the evlist even with errors to allow callers to clean up.
2257          */
2258         evlist__splice_list_tail(evlist, &parse_state.list);
2259
2260         if (!ret) {
2261                 struct evsel *last;
2262
2263                 evlist->core.nr_groups += parse_state.nr_groups;
2264                 last = evlist__last(evlist);
2265                 last->cmdline_group_boundary = true;
2266
2267                 return 0;
2268         }
2269
2270         /*
2271          * There are 2 users - builtin-record and builtin-test objects.
2272          * Both call evlist__delete in case of error, so we dont
2273          * need to bother.
2274          */
2275         return ret;
2276 }
2277
2278 int parse_event(struct evlist *evlist, const char *str)
2279 {
2280         struct parse_events_error err;
2281         int ret;
2282
2283         parse_events_error__init(&err);
2284         ret = parse_events(evlist, str, &err);
2285         parse_events_error__exit(&err);
2286         return ret;
2287 }
2288
2289 void parse_events_error__init(struct parse_events_error *err)
2290 {
2291         bzero(err, sizeof(*err));
2292 }
2293
2294 void parse_events_error__exit(struct parse_events_error *err)
2295 {
2296         zfree(&err->str);
2297         zfree(&err->help);
2298         zfree(&err->first_str);
2299         zfree(&err->first_help);
2300 }
2301
2302 void parse_events_error__handle(struct parse_events_error *err, int idx,
2303                                 char *str, char *help)
2304 {
2305         if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2306                 goto out_free;
2307         switch (err->num_errors) {
2308         case 0:
2309                 err->idx = idx;
2310                 err->str = str;
2311                 err->help = help;
2312                 break;
2313         case 1:
2314                 err->first_idx = err->idx;
2315                 err->idx = idx;
2316                 err->first_str = err->str;
2317                 err->str = str;
2318                 err->first_help = err->help;
2319                 err->help = help;
2320                 break;
2321         default:
2322                 pr_debug("Multiple errors dropping message: %s (%s)\n",
2323                         err->str, err->help);
2324                 free(err->str);
2325                 err->str = str;
2326                 free(err->help);
2327                 err->help = help;
2328                 break;
2329         }
2330         err->num_errors++;
2331         return;
2332
2333 out_free:
2334         free(str);
2335         free(help);
2336 }
2337
2338 #define MAX_WIDTH 1000
2339 static int get_term_width(void)
2340 {
2341         struct winsize ws;
2342
2343         get_term_dimensions(&ws);
2344         return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2345 }
2346
2347 static void __parse_events_error__print(int err_idx, const char *err_str,
2348                                         const char *err_help, const char *event)
2349 {
2350         const char *str = "invalid or unsupported event: ";
2351         char _buf[MAX_WIDTH];
2352         char *buf = (char *) event;
2353         int idx = 0;
2354         if (err_str) {
2355                 /* -2 for extra '' in the final fprintf */
2356                 int width       = get_term_width() - 2;
2357                 int len_event   = strlen(event);
2358                 int len_str, max_len, cut = 0;
2359
2360                 /*
2361                  * Maximum error index indent, we will cut
2362                  * the event string if it's bigger.
2363                  */
2364                 int max_err_idx = 13;
2365
2366                 /*
2367                  * Let's be specific with the message when
2368                  * we have the precise error.
2369                  */
2370                 str     = "event syntax error: ";
2371                 len_str = strlen(str);
2372                 max_len = width - len_str;
2373
2374                 buf = _buf;
2375
2376                 /* We're cutting from the beginning. */
2377                 if (err_idx > max_err_idx)
2378                         cut = err_idx - max_err_idx;
2379
2380                 strncpy(buf, event + cut, max_len);
2381
2382                 /* Mark cut parts with '..' on both sides. */
2383                 if (cut)
2384                         buf[0] = buf[1] = '.';
2385
2386                 if ((len_event - cut) > max_len) {
2387                         buf[max_len - 1] = buf[max_len - 2] = '.';
2388                         buf[max_len] = 0;
2389                 }
2390
2391                 idx = len_str + err_idx - cut;
2392         }
2393
2394         fprintf(stderr, "%s'%s'\n", str, buf);
2395         if (idx) {
2396                 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2397                 if (err_help)
2398                         fprintf(stderr, "\n%s\n", err_help);
2399         }
2400 }
2401
2402 void parse_events_error__print(struct parse_events_error *err,
2403                                const char *event)
2404 {
2405         if (!err->num_errors)
2406                 return;
2407
2408         __parse_events_error__print(err->idx, err->str, err->help, event);
2409
2410         if (err->num_errors > 1) {
2411                 fputs("\nInitial error:\n", stderr);
2412                 __parse_events_error__print(err->first_idx, err->first_str,
2413                                         err->first_help, event);
2414         }
2415 }
2416
2417 #undef MAX_WIDTH
2418
2419 int parse_events_option(const struct option *opt, const char *str,
2420                         int unset __maybe_unused)
2421 {
2422         struct evlist *evlist = *(struct evlist **)opt->value;
2423         struct parse_events_error err;
2424         int ret;
2425
2426         parse_events_error__init(&err);
2427         ret = parse_events(evlist, str, &err);
2428
2429         if (ret) {
2430                 parse_events_error__print(&err, str);
2431                 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2432         }
2433         parse_events_error__exit(&err);
2434
2435         return ret;
2436 }
2437
2438 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2439 {
2440         struct evlist **evlistp = opt->value;
2441         int ret;
2442
2443         if (*evlistp == NULL) {
2444                 *evlistp = evlist__new();
2445
2446                 if (*evlistp == NULL) {
2447                         fprintf(stderr, "Not enough memory to create evlist\n");
2448                         return -1;
2449                 }
2450         }
2451
2452         ret = parse_events_option(opt, str, unset);
2453         if (ret) {
2454                 evlist__delete(*evlistp);
2455                 *evlistp = NULL;
2456         }
2457
2458         return ret;
2459 }
2460
2461 static int
2462 foreach_evsel_in_last_glob(struct evlist *evlist,
2463                            int (*func)(struct evsel *evsel,
2464                                        const void *arg),
2465                            const void *arg)
2466 {
2467         struct evsel *last = NULL;
2468         int err;
2469
2470         /*
2471          * Don't return when list_empty, give func a chance to report
2472          * error when it found last == NULL.
2473          *
2474          * So no need to WARN here, let *func do this.
2475          */
2476         if (evlist->core.nr_entries > 0)
2477                 last = evlist__last(evlist);
2478
2479         do {
2480                 err = (*func)(last, arg);
2481                 if (err)
2482                         return -1;
2483                 if (!last)
2484                         return 0;
2485
2486                 if (last->core.node.prev == &evlist->core.entries)
2487                         return 0;
2488                 last = list_entry(last->core.node.prev, struct evsel, core.node);
2489         } while (!last->cmdline_group_boundary);
2490
2491         return 0;
2492 }
2493
2494 static int set_filter(struct evsel *evsel, const void *arg)
2495 {
2496         const char *str = arg;
2497         bool found = false;
2498         int nr_addr_filters = 0;
2499         struct perf_pmu *pmu = NULL;
2500
2501         if (evsel == NULL) {
2502                 fprintf(stderr,
2503                         "--filter option should follow a -e tracepoint or HW tracer option\n");
2504                 return -1;
2505         }
2506
2507         if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2508                 if (evsel__append_tp_filter(evsel, str) < 0) {
2509                         fprintf(stderr,
2510                                 "not enough memory to hold filter string\n");
2511                         return -1;
2512                 }
2513
2514                 return 0;
2515         }
2516
2517         while ((pmu = perf_pmu__scan(pmu)) != NULL)
2518                 if (pmu->type == evsel->core.attr.type) {
2519                         found = true;
2520                         break;
2521                 }
2522
2523         if (found)
2524                 perf_pmu__scan_file(pmu, "nr_addr_filters",
2525                                     "%d", &nr_addr_filters);
2526
2527         if (!nr_addr_filters) {
2528                 fprintf(stderr,
2529                         "This CPU does not support address filtering\n");
2530                 return -1;
2531         }
2532
2533         if (evsel__append_addr_filter(evsel, str) < 0) {
2534                 fprintf(stderr,
2535                         "not enough memory to hold filter string\n");
2536                 return -1;
2537         }
2538
2539         return 0;
2540 }
2541
2542 int parse_filter(const struct option *opt, const char *str,
2543                  int unset __maybe_unused)
2544 {
2545         struct evlist *evlist = *(struct evlist **)opt->value;
2546
2547         return foreach_evsel_in_last_glob(evlist, set_filter,
2548                                           (const void *)str);
2549 }
2550
2551 static int add_exclude_perf_filter(struct evsel *evsel,
2552                                    const void *arg __maybe_unused)
2553 {
2554         char new_filter[64];
2555
2556         if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2557                 fprintf(stderr,
2558                         "--exclude-perf option should follow a -e tracepoint option\n");
2559                 return -1;
2560         }
2561
2562         snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2563
2564         if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2565                 fprintf(stderr,
2566                         "not enough memory to hold filter string\n");
2567                 return -1;
2568         }
2569
2570         return 0;
2571 }
2572
2573 int exclude_perf(const struct option *opt,
2574                  const char *arg __maybe_unused,
2575                  int unset __maybe_unused)
2576 {
2577         struct evlist *evlist = *(struct evlist **)opt->value;
2578
2579         return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2580                                           NULL);
2581 }
2582
2583 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2584 {
2585         return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2586 }
2587
2588 static int new_term(struct parse_events_term **_term,
2589                     struct parse_events_term *temp,
2590                     char *str, u64 num)
2591 {
2592         struct parse_events_term *term;
2593
2594         term = malloc(sizeof(*term));
2595         if (!term)
2596                 return -ENOMEM;
2597
2598         *term = *temp;
2599         INIT_LIST_HEAD(&term->list);
2600         term->weak = false;
2601
2602         switch (term->type_val) {
2603         case PARSE_EVENTS__TERM_TYPE_NUM:
2604                 term->val.num = num;
2605                 break;
2606         case PARSE_EVENTS__TERM_TYPE_STR:
2607                 term->val.str = str;
2608                 break;
2609         default:
2610                 free(term);
2611                 return -EINVAL;
2612         }
2613
2614         *_term = term;
2615         return 0;
2616 }
2617
2618 int parse_events_term__num(struct parse_events_term **term,
2619                            int type_term, char *config, u64 num,
2620                            bool no_value,
2621                            void *loc_term_, void *loc_val_)
2622 {
2623         YYLTYPE *loc_term = loc_term_;
2624         YYLTYPE *loc_val = loc_val_;
2625
2626         struct parse_events_term temp = {
2627                 .type_val  = PARSE_EVENTS__TERM_TYPE_NUM,
2628                 .type_term = type_term,
2629                 .config    = config ? : strdup(config_term_names[type_term]),
2630                 .no_value  = no_value,
2631                 .err_term  = loc_term ? loc_term->first_column : 0,
2632                 .err_val   = loc_val  ? loc_val->first_column  : 0,
2633         };
2634
2635         return new_term(term, &temp, NULL, num);
2636 }
2637
2638 int parse_events_term__str(struct parse_events_term **term,
2639                            int type_term, char *config, char *str,
2640                            void *loc_term_, void *loc_val_)
2641 {
2642         YYLTYPE *loc_term = loc_term_;
2643         YYLTYPE *loc_val = loc_val_;
2644
2645         struct parse_events_term temp = {
2646                 .type_val  = PARSE_EVENTS__TERM_TYPE_STR,
2647                 .type_term = type_term,
2648                 .config    = config,
2649                 .err_term  = loc_term ? loc_term->first_column : 0,
2650                 .err_val   = loc_val  ? loc_val->first_column  : 0,
2651         };
2652
2653         return new_term(term, &temp, str, 0);
2654 }
2655
2656 int parse_events_term__sym_hw(struct parse_events_term **term,
2657                               char *config, unsigned idx)
2658 {
2659         struct event_symbol *sym;
2660         char *str;
2661         struct parse_events_term temp = {
2662                 .type_val  = PARSE_EVENTS__TERM_TYPE_STR,
2663                 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
2664                 .config    = config,
2665         };
2666
2667         if (!temp.config) {
2668                 temp.config = strdup("event");
2669                 if (!temp.config)
2670                         return -ENOMEM;
2671         }
2672         BUG_ON(idx >= PERF_COUNT_HW_MAX);
2673         sym = &event_symbols_hw[idx];
2674
2675         str = strdup(sym->symbol);
2676         if (!str)
2677                 return -ENOMEM;
2678         return new_term(term, &temp, str, 0);
2679 }
2680
2681 int parse_events_term__clone(struct parse_events_term **new,
2682                              struct parse_events_term *term)
2683 {
2684         char *str;
2685         struct parse_events_term temp = {
2686                 .type_val  = term->type_val,
2687                 .type_term = term->type_term,
2688                 .config    = NULL,
2689                 .err_term  = term->err_term,
2690                 .err_val   = term->err_val,
2691         };
2692
2693         if (term->config) {
2694                 temp.config = strdup(term->config);
2695                 if (!temp.config)
2696                         return -ENOMEM;
2697         }
2698         if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2699                 return new_term(new, &temp, NULL, term->val.num);
2700
2701         str = strdup(term->val.str);
2702         if (!str)
2703                 return -ENOMEM;
2704         return new_term(new, &temp, str, 0);
2705 }
2706
2707 void parse_events_term__delete(struct parse_events_term *term)
2708 {
2709         if (term->array.nr_ranges)
2710                 zfree(&term->array.ranges);
2711
2712         if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2713                 zfree(&term->val.str);
2714
2715         zfree(&term->config);
2716         free(term);
2717 }
2718
2719 int parse_events_copy_term_list(struct list_head *old,
2720                                  struct list_head **new)
2721 {
2722         struct parse_events_term *term, *n;
2723         int ret;
2724
2725         if (!old) {
2726                 *new = NULL;
2727                 return 0;
2728         }
2729
2730         *new = malloc(sizeof(struct list_head));
2731         if (!*new)
2732                 return -ENOMEM;
2733         INIT_LIST_HEAD(*new);
2734
2735         list_for_each_entry (term, old, list) {
2736                 ret = parse_events_term__clone(&n, term);
2737                 if (ret)
2738                         return ret;
2739                 list_add_tail(&n->list, *new);
2740         }
2741         return 0;
2742 }
2743
2744 void parse_events_terms__purge(struct list_head *terms)
2745 {
2746         struct parse_events_term *term, *h;
2747
2748         list_for_each_entry_safe(term, h, terms, list) {
2749                 list_del_init(&term->list);
2750                 parse_events_term__delete(term);
2751         }
2752 }
2753
2754 void parse_events_terms__delete(struct list_head *terms)
2755 {
2756         if (!terms)
2757                 return;
2758         parse_events_terms__purge(terms);
2759         free(terms);
2760 }
2761
2762 void parse_events__clear_array(struct parse_events_array *a)
2763 {
2764         zfree(&a->ranges);
2765 }
2766
2767 void parse_events_evlist_error(struct parse_events_state *parse_state,
2768                                int idx, const char *str)
2769 {
2770         if (!parse_state->error)
2771                 return;
2772
2773         parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
2774 }
2775
2776 static void config_terms_list(char *buf, size_t buf_sz)
2777 {
2778         int i;
2779         bool first = true;
2780
2781         buf[0] = '\0';
2782         for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2783                 const char *name = config_term_names[i];
2784
2785                 if (!config_term_avail(i, NULL))
2786                         continue;
2787                 if (!name)
2788                         continue;
2789                 if (name[0] == '<')
2790                         continue;
2791
2792                 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2793                         return;
2794
2795                 if (!first)
2796                         strcat(buf, ",");
2797                 else
2798                         first = false;
2799                 strcat(buf, name);
2800         }
2801 }
2802
2803 /*
2804  * Return string contains valid config terms of an event.
2805  * @additional_terms: For terms such as PMU sysfs terms.
2806  */
2807 char *parse_events_formats_error_string(char *additional_terms)
2808 {
2809         char *str;
2810         /* "no-overwrite" is the longest name */
2811         char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2812                           (sizeof("no-overwrite") - 1)];
2813
2814         config_terms_list(static_terms, sizeof(static_terms));
2815         /* valid terms */
2816         if (additional_terms) {
2817                 if (asprintf(&str, "valid terms: %s,%s",
2818                              additional_terms, static_terms) < 0)
2819                         goto fail;
2820         } else {
2821                 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2822                         goto fail;
2823         }
2824         return str;
2825
2826 fail:
2827         return NULL;
2828 }
2829
2830 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
2831                                              struct perf_event_attr *attr,
2832                                              const char *name,
2833                                              const char *metric_id,
2834                                              struct perf_pmu *pmu,
2835                                              struct list_head *config_terms)
2836 {
2837         return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id,
2838                            pmu, config_terms, /*auto_merge_stats=*/false,
2839                            /*cpu_list=*/NULL);
2840 }