perf lock contention: Update default map size to 16384
[platform/kernel/linux-starfive.git] / tools / perf / util / bpf-loader.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf-loader.c
4  *
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  */
8
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/zalloc.h>
17 #include <errno.h>
18 #include <stdlib.h>
19 #include "debug.h"
20 #include "evlist.h"
21 #include "bpf-loader.h"
22 #include "bpf-prologue.h"
23 #include "probe-event.h"
24 #include "probe-finder.h" // for MAX_PROBES
25 #include "parse-events.h"
26 #include "strfilter.h"
27 #include "util.h"
28 #include "llvm-utils.h"
29 #include "c++/clang-c.h"
30 #include "util/hashmap.h"
31 #include "asm/bug.h"
32
33 #include <internal/xyarray.h>
34
35 /* temporarily disable libbpf deprecation warnings */
36 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
37
38 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
39                               const char *fmt, va_list args)
40 {
41         return veprintf(1, verbose, pr_fmt(fmt), args);
42 }
43
44 struct bpf_prog_priv {
45         bool is_tp;
46         char *sys_name;
47         char *evt_name;
48         struct perf_probe_event pev;
49         bool need_prologue;
50         struct bpf_insn *insns_buf;
51         int nr_types;
52         int *type_mapping;
53         int *prologue_fds;
54 };
55
56 struct bpf_perf_object {
57         struct list_head list;
58         struct bpf_object *obj;
59 };
60
61 struct bpf_preproc_result {
62         struct bpf_insn *new_insn_ptr;
63         int new_insn_cnt;
64 };
65
66 static LIST_HEAD(bpf_objects_list);
67 static struct hashmap *bpf_program_hash;
68 static struct hashmap *bpf_map_hash;
69
70 static struct bpf_perf_object *
71 bpf_perf_object__next(struct bpf_perf_object *prev)
72 {
73         if (!prev) {
74                 if (list_empty(&bpf_objects_list))
75                         return NULL;
76
77                 return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
78         }
79         if (list_is_last(&prev->list, &bpf_objects_list))
80                 return NULL;
81
82         return list_next_entry(prev, list);
83 }
84
85 #define bpf_perf_object__for_each(perf_obj, tmp)        \
86         for ((perf_obj) = bpf_perf_object__next(NULL),  \
87              (tmp) = bpf_perf_object__next(perf_obj);   \
88              (perf_obj) != NULL;                        \
89              (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
90
91 static bool libbpf_initialized;
92 static int libbpf_sec_handler;
93
94 static int bpf_perf_object__add(struct bpf_object *obj)
95 {
96         struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
97
98         if (perf_obj) {
99                 INIT_LIST_HEAD(&perf_obj->list);
100                 perf_obj->obj = obj;
101                 list_add_tail(&perf_obj->list, &bpf_objects_list);
102         }
103         return perf_obj ? 0 : -ENOMEM;
104 }
105
106 static void *program_priv(const struct bpf_program *prog)
107 {
108         void *priv;
109
110         if (IS_ERR_OR_NULL(bpf_program_hash))
111                 return NULL;
112         if (!hashmap__find(bpf_program_hash, prog, &priv))
113                 return NULL;
114         return priv;
115 }
116
117 static struct bpf_insn prologue_init_insn[] = {
118         BPF_MOV64_IMM(BPF_REG_2, 0),
119         BPF_MOV64_IMM(BPF_REG_3, 0),
120         BPF_MOV64_IMM(BPF_REG_4, 0),
121         BPF_MOV64_IMM(BPF_REG_5, 0),
122 };
123
124 static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
125                                        struct bpf_prog_load_opts *opts __maybe_unused,
126                                        long cookie __maybe_unused)
127 {
128         size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
129         size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
130         struct bpf_prog_priv *priv = program_priv(prog);
131         const struct bpf_insn *orig_insn;
132         struct bpf_insn *insn;
133
134         if (IS_ERR_OR_NULL(priv)) {
135                 pr_debug("bpf: failed to get private field\n");
136                 return -BPF_LOADER_ERRNO__INTERNAL;
137         }
138
139         if (!priv->need_prologue)
140                 return 0;
141
142         /* prepend initialization code to program instructions */
143         orig_insn = bpf_program__insns(prog);
144         orig_insn_cnt = bpf_program__insn_cnt(prog);
145         init_size = init_size_cnt * sizeof(*insn);
146         orig_size = orig_insn_cnt * sizeof(*insn);
147
148         insn_cnt = orig_insn_cnt + init_size_cnt;
149         insn = malloc(insn_cnt * sizeof(*insn));
150         if (!insn)
151                 return -ENOMEM;
152
153         memcpy(insn, prologue_init_insn, init_size);
154         memcpy((char *) insn + init_size, orig_insn, orig_size);
155         bpf_program__set_insns(prog, insn, insn_cnt);
156         return 0;
157 }
158
159 static int libbpf_init(void)
160 {
161         LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
162                 .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
163         );
164
165         if (libbpf_initialized)
166                 return 0;
167
168         libbpf_set_print(libbpf_perf_print);
169         libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
170                                                           0, &handler_opts);
171         if (libbpf_sec_handler < 0) {
172                 pr_debug("bpf: failed to register libbpf section handler: %d\n",
173                          libbpf_sec_handler);
174                 return -BPF_LOADER_ERRNO__INTERNAL;
175         }
176         libbpf_initialized = true;
177         return 0;
178 }
179
180 struct bpf_object *
181 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
182 {
183         LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
184         struct bpf_object *obj;
185         int err;
186
187         err = libbpf_init();
188         if (err)
189                 return ERR_PTR(err);
190
191         obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
192         if (IS_ERR_OR_NULL(obj)) {
193                 pr_debug("bpf: failed to load buffer\n");
194                 return ERR_PTR(-EINVAL);
195         }
196
197         if (bpf_perf_object__add(obj)) {
198                 bpf_object__close(obj);
199                 return ERR_PTR(-ENOMEM);
200         }
201
202         return obj;
203 }
204
205 static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
206 {
207         list_del(&perf_obj->list);
208         bpf_object__close(perf_obj->obj);
209         free(perf_obj);
210 }
211
212 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
213 {
214         LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
215         struct bpf_object *obj;
216         int err;
217
218         err = libbpf_init();
219         if (err)
220                 return ERR_PTR(err);
221
222         if (source) {
223                 void *obj_buf;
224                 size_t obj_buf_sz;
225
226                 perf_clang__init();
227                 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
228                 perf_clang__cleanup();
229                 if (err) {
230                         pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
231                         err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
232                         if (err)
233                                 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
234                 } else
235                         pr_debug("bpf: successful builtin compilation\n");
236                 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
237
238                 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
239                         llvm__dump_obj(filename, obj_buf, obj_buf_sz);
240
241                 free(obj_buf);
242         } else {
243                 obj = bpf_object__open(filename);
244         }
245
246         if (IS_ERR_OR_NULL(obj)) {
247                 pr_debug("bpf: failed to load %s\n", filename);
248                 return obj;
249         }
250
251         if (bpf_perf_object__add(obj)) {
252                 bpf_object__close(obj);
253                 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
254         }
255
256         return obj;
257 }
258
259 static void close_prologue_programs(struct bpf_prog_priv *priv)
260 {
261         struct perf_probe_event *pev;
262         int i, fd;
263
264         if (!priv->need_prologue)
265                 return;
266         pev = &priv->pev;
267         for (i = 0; i < pev->ntevs; i++) {
268                 fd = priv->prologue_fds[i];
269                 if (fd != -1)
270                         close(fd);
271         }
272 }
273
274 static void
275 clear_prog_priv(const struct bpf_program *prog __maybe_unused,
276                 void *_priv)
277 {
278         struct bpf_prog_priv *priv = _priv;
279
280         close_prologue_programs(priv);
281         cleanup_perf_probe_events(&priv->pev, 1);
282         zfree(&priv->insns_buf);
283         zfree(&priv->prologue_fds);
284         zfree(&priv->type_mapping);
285         zfree(&priv->sys_name);
286         zfree(&priv->evt_name);
287         free(priv);
288 }
289
290 static void bpf_program_hash_free(void)
291 {
292         struct hashmap_entry *cur;
293         size_t bkt;
294
295         if (IS_ERR_OR_NULL(bpf_program_hash))
296                 return;
297
298         hashmap__for_each_entry(bpf_program_hash, cur, bkt)
299                 clear_prog_priv(cur->pkey, cur->pvalue);
300
301         hashmap__free(bpf_program_hash);
302         bpf_program_hash = NULL;
303 }
304
305 static void bpf_map_hash_free(void);
306
307 void bpf__clear(void)
308 {
309         struct bpf_perf_object *perf_obj, *tmp;
310
311         bpf_perf_object__for_each(perf_obj, tmp) {
312                 bpf__unprobe(perf_obj->obj);
313                 bpf_perf_object__close(perf_obj);
314         }
315
316         bpf_program_hash_free();
317         bpf_map_hash_free();
318 }
319
320 static size_t ptr_hash(const long __key, void *ctx __maybe_unused)
321 {
322         return __key;
323 }
324
325 static bool ptr_equal(long key1, long key2, void *ctx __maybe_unused)
326 {
327         return key1 == key2;
328 }
329
330 static int program_set_priv(struct bpf_program *prog, void *priv)
331 {
332         void *old_priv;
333
334         /*
335          * Should not happen, we warn about it in the
336          * caller function - config_bpf_program
337          */
338         if (IS_ERR(bpf_program_hash))
339                 return PTR_ERR(bpf_program_hash);
340
341         if (!bpf_program_hash) {
342                 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
343                 if (IS_ERR(bpf_program_hash))
344                         return PTR_ERR(bpf_program_hash);
345         }
346
347         old_priv = program_priv(prog);
348         if (old_priv) {
349                 clear_prog_priv(prog, old_priv);
350                 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
351         }
352         return hashmap__add(bpf_program_hash, prog, priv);
353 }
354
355 static int
356 prog_config__exec(const char *value, struct perf_probe_event *pev)
357 {
358         pev->uprobes = true;
359         pev->target = strdup(value);
360         if (!pev->target)
361                 return -ENOMEM;
362         return 0;
363 }
364
365 static int
366 prog_config__module(const char *value, struct perf_probe_event *pev)
367 {
368         pev->uprobes = false;
369         pev->target = strdup(value);
370         if (!pev->target)
371                 return -ENOMEM;
372         return 0;
373 }
374
375 static int
376 prog_config__bool(const char *value, bool *pbool, bool invert)
377 {
378         int err;
379         bool bool_value;
380
381         if (!pbool)
382                 return -EINVAL;
383
384         err = strtobool(value, &bool_value);
385         if (err)
386                 return err;
387
388         *pbool = invert ? !bool_value : bool_value;
389         return 0;
390 }
391
392 static int
393 prog_config__inlines(const char *value,
394                      struct perf_probe_event *pev __maybe_unused)
395 {
396         return prog_config__bool(value, &probe_conf.no_inlines, true);
397 }
398
399 static int
400 prog_config__force(const char *value,
401                    struct perf_probe_event *pev __maybe_unused)
402 {
403         return prog_config__bool(value, &probe_conf.force_add, false);
404 }
405
406 static struct {
407         const char *key;
408         const char *usage;
409         const char *desc;
410         int (*func)(const char *, struct perf_probe_event *);
411 } bpf_prog_config_terms[] = {
412         {
413                 .key    = "exec",
414                 .usage  = "exec=<full path of file>",
415                 .desc   = "Set uprobe target",
416                 .func   = prog_config__exec,
417         },
418         {
419                 .key    = "module",
420                 .usage  = "module=<module name>    ",
421                 .desc   = "Set kprobe module",
422                 .func   = prog_config__module,
423         },
424         {
425                 .key    = "inlines",
426                 .usage  = "inlines=[yes|no]        ",
427                 .desc   = "Probe at inline symbol",
428                 .func   = prog_config__inlines,
429         },
430         {
431                 .key    = "force",
432                 .usage  = "force=[yes|no]          ",
433                 .desc   = "Forcibly add events with existing name",
434                 .func   = prog_config__force,
435         },
436 };
437
438 static int
439 do_prog_config(const char *key, const char *value,
440                struct perf_probe_event *pev)
441 {
442         unsigned int i;
443
444         pr_debug("config bpf program: %s=%s\n", key, value);
445         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
446                 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
447                         return bpf_prog_config_terms[i].func(value, pev);
448
449         pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
450                  key, value);
451
452         pr_debug("\nHint: Valid options are:\n");
453         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
454                 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
455                          bpf_prog_config_terms[i].desc);
456         pr_debug("\n");
457
458         return -BPF_LOADER_ERRNO__PROGCONF_TERM;
459 }
460
461 static const char *
462 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
463 {
464         char *text = strdup(config_str);
465         char *sep, *line;
466         const char *main_str = NULL;
467         int err = 0;
468
469         if (!text) {
470                 pr_debug("Not enough memory: dup config_str failed\n");
471                 return ERR_PTR(-ENOMEM);
472         }
473
474         line = text;
475         while ((sep = strchr(line, ';'))) {
476                 char *equ;
477
478                 *sep = '\0';
479                 equ = strchr(line, '=');
480                 if (!equ) {
481                         pr_warning("WARNING: invalid config in BPF object: %s\n",
482                                    line);
483                         pr_warning("\tShould be 'key=value'.\n");
484                         goto nextline;
485                 }
486                 *equ = '\0';
487
488                 err = do_prog_config(line, equ + 1, pev);
489                 if (err)
490                         break;
491 nextline:
492                 line = sep + 1;
493         }
494
495         if (!err)
496                 main_str = config_str + (line - text);
497         free(text);
498
499         return err ? ERR_PTR(err) : main_str;
500 }
501
502 static int
503 parse_prog_config(const char *config_str, const char **p_main_str,
504                   bool *is_tp, struct perf_probe_event *pev)
505 {
506         int err;
507         const char *main_str = parse_prog_config_kvpair(config_str, pev);
508
509         if (IS_ERR(main_str))
510                 return PTR_ERR(main_str);
511
512         *p_main_str = main_str;
513         if (!strchr(main_str, '=')) {
514                 /* Is a tracepoint event? */
515                 const char *s = strchr(main_str, ':');
516
517                 if (!s) {
518                         pr_debug("bpf: '%s' is not a valid tracepoint\n",
519                                  config_str);
520                         return -BPF_LOADER_ERRNO__CONFIG;
521                 }
522
523                 *is_tp = true;
524                 return 0;
525         }
526
527         *is_tp = false;
528         err = parse_perf_probe_command(main_str, pev);
529         if (err < 0) {
530                 pr_debug("bpf: '%s' is not a valid config string\n",
531                          config_str);
532                 /* parse failed, don't need clear pev. */
533                 return -BPF_LOADER_ERRNO__CONFIG;
534         }
535         return 0;
536 }
537
538 static int
539 config_bpf_program(struct bpf_program *prog)
540 {
541         struct perf_probe_event *pev = NULL;
542         struct bpf_prog_priv *priv = NULL;
543         const char *config_str, *main_str;
544         bool is_tp = false;
545         int err;
546
547         /* Initialize per-program probing setting */
548         probe_conf.no_inlines = false;
549         probe_conf.force_add = false;
550
551         priv = calloc(sizeof(*priv), 1);
552         if (!priv) {
553                 pr_debug("bpf: failed to alloc priv\n");
554                 return -ENOMEM;
555         }
556         pev = &priv->pev;
557
558         config_str = bpf_program__section_name(prog);
559         pr_debug("bpf: config program '%s'\n", config_str);
560         err = parse_prog_config(config_str, &main_str, &is_tp, pev);
561         if (err)
562                 goto errout;
563
564         if (is_tp) {
565                 char *s = strchr(main_str, ':');
566
567                 priv->is_tp = true;
568                 priv->sys_name = strndup(main_str, s - main_str);
569                 priv->evt_name = strdup(s + 1);
570                 goto set_priv;
571         }
572
573         if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
574                 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
575                          config_str, PERF_BPF_PROBE_GROUP);
576                 err = -BPF_LOADER_ERRNO__GROUP;
577                 goto errout;
578         } else if (!pev->group)
579                 pev->group = strdup(PERF_BPF_PROBE_GROUP);
580
581         if (!pev->group) {
582                 pr_debug("bpf: strdup failed\n");
583                 err = -ENOMEM;
584                 goto errout;
585         }
586
587         if (!pev->event) {
588                 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
589                          config_str);
590                 err = -BPF_LOADER_ERRNO__EVENTNAME;
591                 goto errout;
592         }
593         pr_debug("bpf: config '%s' is ok\n", config_str);
594
595 set_priv:
596         err = program_set_priv(prog, priv);
597         if (err) {
598                 pr_debug("Failed to set priv for program '%s'\n", config_str);
599                 goto errout;
600         }
601
602         return 0;
603
604 errout:
605         if (pev)
606                 clear_perf_probe_event(pev);
607         free(priv);
608         return err;
609 }
610
611 static int bpf__prepare_probe(void)
612 {
613         static int err = 0;
614         static bool initialized = false;
615
616         /*
617          * Make err static, so if init failed the first, bpf__prepare_probe()
618          * fails each time without calling init_probe_symbol_maps multiple
619          * times.
620          */
621         if (initialized)
622                 return err;
623
624         initialized = true;
625         err = init_probe_symbol_maps(false);
626         if (err < 0)
627                 pr_debug("Failed to init_probe_symbol_maps\n");
628         probe_conf.max_probes = MAX_PROBES;
629         return err;
630 }
631
632 static int
633 preproc_gen_prologue(struct bpf_program *prog, int n,
634                      const struct bpf_insn *orig_insns, int orig_insns_cnt,
635                      struct bpf_preproc_result *res)
636 {
637         struct bpf_prog_priv *priv = program_priv(prog);
638         struct probe_trace_event *tev;
639         struct perf_probe_event *pev;
640         struct bpf_insn *buf;
641         size_t prologue_cnt = 0;
642         int i, err;
643
644         if (IS_ERR_OR_NULL(priv) || priv->is_tp)
645                 goto errout;
646
647         pev = &priv->pev;
648
649         if (n < 0 || n >= priv->nr_types)
650                 goto errout;
651
652         /* Find a tev belongs to that type */
653         for (i = 0; i < pev->ntevs; i++) {
654                 if (priv->type_mapping[i] == n)
655                         break;
656         }
657
658         if (i >= pev->ntevs) {
659                 pr_debug("Internal error: prologue type %d not found\n", n);
660                 return -BPF_LOADER_ERRNO__PROLOGUE;
661         }
662
663         tev = &pev->tevs[i];
664
665         buf = priv->insns_buf;
666         err = bpf__gen_prologue(tev->args, tev->nargs,
667                                 buf, &prologue_cnt,
668                                 BPF_MAXINSNS - orig_insns_cnt);
669         if (err) {
670                 const char *title;
671
672                 title = bpf_program__section_name(prog);
673                 pr_debug("Failed to generate prologue for program %s\n",
674                          title);
675                 return err;
676         }
677
678         memcpy(&buf[prologue_cnt], orig_insns,
679                sizeof(struct bpf_insn) * orig_insns_cnt);
680
681         res->new_insn_ptr = buf;
682         res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
683         return 0;
684
685 errout:
686         pr_debug("Internal error in preproc_gen_prologue\n");
687         return -BPF_LOADER_ERRNO__PROLOGUE;
688 }
689
690 /*
691  * compare_tev_args is reflexive, transitive and antisymmetric.
692  * I can proof it but this margin is too narrow to contain.
693  */
694 static int compare_tev_args(const void *ptev1, const void *ptev2)
695 {
696         int i, ret;
697         const struct probe_trace_event *tev1 =
698                 *(const struct probe_trace_event **)ptev1;
699         const struct probe_trace_event *tev2 =
700                 *(const struct probe_trace_event **)ptev2;
701
702         ret = tev2->nargs - tev1->nargs;
703         if (ret)
704                 return ret;
705
706         for (i = 0; i < tev1->nargs; i++) {
707                 struct probe_trace_arg *arg1, *arg2;
708                 struct probe_trace_arg_ref *ref1, *ref2;
709
710                 arg1 = &tev1->args[i];
711                 arg2 = &tev2->args[i];
712
713                 ret = strcmp(arg1->value, arg2->value);
714                 if (ret)
715                         return ret;
716
717                 ref1 = arg1->ref;
718                 ref2 = arg2->ref;
719
720                 while (ref1 && ref2) {
721                         ret = ref2->offset - ref1->offset;
722                         if (ret)
723                                 return ret;
724
725                         ref1 = ref1->next;
726                         ref2 = ref2->next;
727                 }
728
729                 if (ref1 || ref2)
730                         return ref2 ? 1 : -1;
731         }
732
733         return 0;
734 }
735
736 /*
737  * Assign a type number to each tevs in a pev.
738  * mapping is an array with same slots as tevs in that pev.
739  * nr_types will be set to number of types.
740  */
741 static int map_prologue(struct perf_probe_event *pev, int *mapping,
742                         int *nr_types)
743 {
744         int i, type = 0;
745         struct probe_trace_event **ptevs;
746
747         size_t array_sz = sizeof(*ptevs) * pev->ntevs;
748
749         ptevs = malloc(array_sz);
750         if (!ptevs) {
751                 pr_debug("Not enough memory: alloc ptevs failed\n");
752                 return -ENOMEM;
753         }
754
755         pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
756         for (i = 0; i < pev->ntevs; i++)
757                 ptevs[i] = &pev->tevs[i];
758
759         qsort(ptevs, pev->ntevs, sizeof(*ptevs),
760               compare_tev_args);
761
762         for (i = 0; i < pev->ntevs; i++) {
763                 int n;
764
765                 n = ptevs[i] - pev->tevs;
766                 if (i == 0) {
767                         mapping[n] = type;
768                         pr_debug("mapping[%d]=%d\n", n, type);
769                         continue;
770                 }
771
772                 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
773                         mapping[n] = type;
774                 else
775                         mapping[n] = ++type;
776
777                 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
778         }
779         free(ptevs);
780         *nr_types = type + 1;
781
782         return 0;
783 }
784
785 static int hook_load_preprocessor(struct bpf_program *prog)
786 {
787         struct bpf_prog_priv *priv = program_priv(prog);
788         struct perf_probe_event *pev;
789         bool need_prologue = false;
790         int i;
791
792         if (IS_ERR_OR_NULL(priv)) {
793                 pr_debug("Internal error when hook preprocessor\n");
794                 return -BPF_LOADER_ERRNO__INTERNAL;
795         }
796
797         if (priv->is_tp) {
798                 priv->need_prologue = false;
799                 return 0;
800         }
801
802         pev = &priv->pev;
803         for (i = 0; i < pev->ntevs; i++) {
804                 struct probe_trace_event *tev = &pev->tevs[i];
805
806                 if (tev->nargs > 0) {
807                         need_prologue = true;
808                         break;
809                 }
810         }
811
812         /*
813          * Since all tevs don't have argument, we don't need generate
814          * prologue.
815          */
816         if (!need_prologue) {
817                 priv->need_prologue = false;
818                 return 0;
819         }
820
821         priv->need_prologue = true;
822         priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
823         if (!priv->insns_buf) {
824                 pr_debug("Not enough memory: alloc insns_buf failed\n");
825                 return -ENOMEM;
826         }
827
828         priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
829         if (!priv->prologue_fds) {
830                 pr_debug("Not enough memory: alloc prologue fds failed\n");
831                 return -ENOMEM;
832         }
833         memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
834
835         priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
836         if (!priv->type_mapping) {
837                 pr_debug("Not enough memory: alloc type_mapping failed\n");
838                 return -ENOMEM;
839         }
840         memset(priv->type_mapping, -1,
841                sizeof(int) * pev->ntevs);
842
843         return map_prologue(pev, priv->type_mapping, &priv->nr_types);
844 }
845
846 int bpf__probe(struct bpf_object *obj)
847 {
848         int err = 0;
849         struct bpf_program *prog;
850         struct bpf_prog_priv *priv;
851         struct perf_probe_event *pev;
852
853         err = bpf__prepare_probe();
854         if (err) {
855                 pr_debug("bpf__prepare_probe failed\n");
856                 return err;
857         }
858
859         bpf_object__for_each_program(prog, obj) {
860                 err = config_bpf_program(prog);
861                 if (err)
862                         goto out;
863
864                 priv = program_priv(prog);
865                 if (IS_ERR_OR_NULL(priv)) {
866                         if (!priv)
867                                 err = -BPF_LOADER_ERRNO__INTERNAL;
868                         else
869                                 err = PTR_ERR(priv);
870                         goto out;
871                 }
872
873                 if (priv->is_tp) {
874                         bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
875                         continue;
876                 }
877
878                 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
879                 pev = &priv->pev;
880
881                 err = convert_perf_probe_events(pev, 1);
882                 if (err < 0) {
883                         pr_debug("bpf_probe: failed to convert perf probe events\n");
884                         goto out;
885                 }
886
887                 err = apply_perf_probe_events(pev, 1);
888                 if (err < 0) {
889                         pr_debug("bpf_probe: failed to apply perf probe events\n");
890                         goto out;
891                 }
892
893                 /*
894                  * After probing, let's consider prologue, which
895                  * adds program fetcher to BPF programs.
896                  *
897                  * hook_load_preprocessor() hooks pre-processor
898                  * to bpf_program, let it generate prologue
899                  * dynamically during loading.
900                  */
901                 err = hook_load_preprocessor(prog);
902                 if (err)
903                         goto out;
904         }
905 out:
906         return err < 0 ? err : 0;
907 }
908
909 #define EVENTS_WRITE_BUFSIZE  4096
910 int bpf__unprobe(struct bpf_object *obj)
911 {
912         int err, ret = 0;
913         struct bpf_program *prog;
914
915         bpf_object__for_each_program(prog, obj) {
916                 struct bpf_prog_priv *priv = program_priv(prog);
917                 int i;
918
919                 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
920                         continue;
921
922                 for (i = 0; i < priv->pev.ntevs; i++) {
923                         struct probe_trace_event *tev = &priv->pev.tevs[i];
924                         char name_buf[EVENTS_WRITE_BUFSIZE];
925                         struct strfilter *delfilter;
926
927                         snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
928                                  "%s:%s", tev->group, tev->event);
929                         name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
930
931                         delfilter = strfilter__new(name_buf, NULL);
932                         if (!delfilter) {
933                                 pr_debug("Failed to create filter for unprobing\n");
934                                 ret = -ENOMEM;
935                                 continue;
936                         }
937
938                         err = del_perf_probe_events(delfilter);
939                         strfilter__delete(delfilter);
940                         if (err) {
941                                 pr_debug("Failed to delete %s\n", name_buf);
942                                 ret = err;
943                                 continue;
944                         }
945                 }
946         }
947         return ret;
948 }
949
950 static int bpf_object__load_prologue(struct bpf_object *obj)
951 {
952         int init_cnt = ARRAY_SIZE(prologue_init_insn);
953         const struct bpf_insn *orig_insns;
954         struct bpf_preproc_result res;
955         struct perf_probe_event *pev;
956         struct bpf_program *prog;
957         int orig_insns_cnt;
958
959         bpf_object__for_each_program(prog, obj) {
960                 struct bpf_prog_priv *priv = program_priv(prog);
961                 int err, i, fd;
962
963                 if (IS_ERR_OR_NULL(priv)) {
964                         pr_debug("bpf: failed to get private field\n");
965                         return -BPF_LOADER_ERRNO__INTERNAL;
966                 }
967
968                 if (!priv->need_prologue)
969                         continue;
970
971                 /*
972                  * For each program that needs prologue we do following:
973                  *
974                  * - take its current instructions and use them
975                  *   to generate the new code with prologue
976                  * - load new instructions with bpf_prog_load
977                  *   and keep the fd in prologue_fds
978                  * - new fd will be used in bpf__foreach_event
979                  *   to connect this program with perf evsel
980                  */
981                 orig_insns = bpf_program__insns(prog);
982                 orig_insns_cnt = bpf_program__insn_cnt(prog);
983
984                 pev = &priv->pev;
985                 for (i = 0; i < pev->ntevs; i++) {
986                         /*
987                          * Skipping artificall prologue_init_insn instructions
988                          * (init_cnt), so the prologue can be generated instead
989                          * of them.
990                          */
991                         err = preproc_gen_prologue(prog, i,
992                                                    orig_insns + init_cnt,
993                                                    orig_insns_cnt - init_cnt,
994                                                    &res);
995                         if (err)
996                                 return err;
997
998                         fd = bpf_prog_load(bpf_program__get_type(prog),
999                                            bpf_program__name(prog), "GPL",
1000                                            res.new_insn_ptr,
1001                                            res.new_insn_cnt, NULL);
1002                         if (fd < 0) {
1003                                 char bf[128];
1004
1005                                 libbpf_strerror(-errno, bf, sizeof(bf));
1006                                 pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
1007                                          -errno, bf);
1008                                 return -errno;
1009                         }
1010                         priv->prologue_fds[i] = fd;
1011                 }
1012                 /*
1013                  * We no longer need the original program,
1014                  * we can unload it.
1015                  */
1016                 bpf_program__unload(prog);
1017         }
1018         return 0;
1019 }
1020
1021 int bpf__load(struct bpf_object *obj)
1022 {
1023         int err;
1024
1025         err = bpf_object__load(obj);
1026         if (err) {
1027                 char bf[128];
1028                 libbpf_strerror(err, bf, sizeof(bf));
1029                 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
1030                 return err;
1031         }
1032         return bpf_object__load_prologue(obj);
1033 }
1034
1035 int bpf__foreach_event(struct bpf_object *obj,
1036                        bpf_prog_iter_callback_t func,
1037                        void *arg)
1038 {
1039         struct bpf_program *prog;
1040         int err;
1041
1042         bpf_object__for_each_program(prog, obj) {
1043                 struct bpf_prog_priv *priv = program_priv(prog);
1044                 struct probe_trace_event *tev;
1045                 struct perf_probe_event *pev;
1046                 int i, fd;
1047
1048                 if (IS_ERR_OR_NULL(priv)) {
1049                         pr_debug("bpf: failed to get private field\n");
1050                         return -BPF_LOADER_ERRNO__INTERNAL;
1051                 }
1052
1053                 if (priv->is_tp) {
1054                         fd = bpf_program__fd(prog);
1055                         err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
1056                         if (err) {
1057                                 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
1058                                 return err;
1059                         }
1060                         continue;
1061                 }
1062
1063                 pev = &priv->pev;
1064                 for (i = 0; i < pev->ntevs; i++) {
1065                         tev = &pev->tevs[i];
1066
1067                         if (priv->need_prologue)
1068                                 fd = priv->prologue_fds[i];
1069                         else
1070                                 fd = bpf_program__fd(prog);
1071
1072                         if (fd < 0) {
1073                                 pr_debug("bpf: failed to get file descriptor\n");
1074                                 return fd;
1075                         }
1076
1077                         err = (*func)(tev->group, tev->event, fd, obj, arg);
1078                         if (err) {
1079                                 pr_debug("bpf: call back failed, stop iterate\n");
1080                                 return err;
1081                         }
1082                 }
1083         }
1084         return 0;
1085 }
1086
1087 enum bpf_map_op_type {
1088         BPF_MAP_OP_SET_VALUE,
1089         BPF_MAP_OP_SET_EVSEL,
1090 };
1091
1092 enum bpf_map_key_type {
1093         BPF_MAP_KEY_ALL,
1094         BPF_MAP_KEY_RANGES,
1095 };
1096
1097 struct bpf_map_op {
1098         struct list_head list;
1099         enum bpf_map_op_type op_type;
1100         enum bpf_map_key_type key_type;
1101         union {
1102                 struct parse_events_array array;
1103         } k;
1104         union {
1105                 u64 value;
1106                 struct evsel *evsel;
1107         } v;
1108 };
1109
1110 struct bpf_map_priv {
1111         struct list_head ops_list;
1112 };
1113
1114 static void
1115 bpf_map_op__delete(struct bpf_map_op *op)
1116 {
1117         if (!list_empty(&op->list))
1118                 list_del_init(&op->list);
1119         if (op->key_type == BPF_MAP_KEY_RANGES)
1120                 parse_events__clear_array(&op->k.array);
1121         free(op);
1122 }
1123
1124 static void
1125 bpf_map_priv__purge(struct bpf_map_priv *priv)
1126 {
1127         struct bpf_map_op *pos, *n;
1128
1129         list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
1130                 list_del_init(&pos->list);
1131                 bpf_map_op__delete(pos);
1132         }
1133 }
1134
1135 static void
1136 bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
1137                     void *_priv)
1138 {
1139         struct bpf_map_priv *priv = _priv;
1140
1141         bpf_map_priv__purge(priv);
1142         free(priv);
1143 }
1144
1145 static void *map_priv(const struct bpf_map *map)
1146 {
1147         void *priv;
1148
1149         if (IS_ERR_OR_NULL(bpf_map_hash))
1150                 return NULL;
1151         if (!hashmap__find(bpf_map_hash, map, &priv))
1152                 return NULL;
1153         return priv;
1154 }
1155
1156 static void bpf_map_hash_free(void)
1157 {
1158         struct hashmap_entry *cur;
1159         size_t bkt;
1160
1161         if (IS_ERR_OR_NULL(bpf_map_hash))
1162                 return;
1163
1164         hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1165                 bpf_map_priv__clear(cur->pkey, cur->pvalue);
1166
1167         hashmap__free(bpf_map_hash);
1168         bpf_map_hash = NULL;
1169 }
1170
1171 static int map_set_priv(struct bpf_map *map, void *priv)
1172 {
1173         void *old_priv;
1174
1175         if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1176                 return PTR_ERR(bpf_program_hash);
1177
1178         if (!bpf_map_hash) {
1179                 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1180                 if (IS_ERR(bpf_map_hash))
1181                         return PTR_ERR(bpf_map_hash);
1182         }
1183
1184         old_priv = map_priv(map);
1185         if (old_priv) {
1186                 bpf_map_priv__clear(map, old_priv);
1187                 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1188         }
1189         return hashmap__add(bpf_map_hash, map, priv);
1190 }
1191
1192 static int
1193 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1194 {
1195         op->key_type = BPF_MAP_KEY_ALL;
1196         if (!term)
1197                 return 0;
1198
1199         if (term->array.nr_ranges) {
1200                 size_t memsz = term->array.nr_ranges *
1201                                 sizeof(op->k.array.ranges[0]);
1202
1203                 op->k.array.ranges = memdup(term->array.ranges, memsz);
1204                 if (!op->k.array.ranges) {
1205                         pr_debug("Not enough memory to alloc indices for map\n");
1206                         return -ENOMEM;
1207                 }
1208                 op->key_type = BPF_MAP_KEY_RANGES;
1209                 op->k.array.nr_ranges = term->array.nr_ranges;
1210         }
1211         return 0;
1212 }
1213
1214 static struct bpf_map_op *
1215 bpf_map_op__new(struct parse_events_term *term)
1216 {
1217         struct bpf_map_op *op;
1218         int err;
1219
1220         op = zalloc(sizeof(*op));
1221         if (!op) {
1222                 pr_debug("Failed to alloc bpf_map_op\n");
1223                 return ERR_PTR(-ENOMEM);
1224         }
1225         INIT_LIST_HEAD(&op->list);
1226
1227         err = bpf_map_op_setkey(op, term);
1228         if (err) {
1229                 free(op);
1230                 return ERR_PTR(err);
1231         }
1232         return op;
1233 }
1234
1235 static struct bpf_map_op *
1236 bpf_map_op__clone(struct bpf_map_op *op)
1237 {
1238         struct bpf_map_op *newop;
1239
1240         newop = memdup(op, sizeof(*op));
1241         if (!newop) {
1242                 pr_debug("Failed to alloc bpf_map_op\n");
1243                 return NULL;
1244         }
1245
1246         INIT_LIST_HEAD(&newop->list);
1247         if (op->key_type == BPF_MAP_KEY_RANGES) {
1248                 size_t memsz = op->k.array.nr_ranges *
1249                                sizeof(op->k.array.ranges[0]);
1250
1251                 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1252                 if (!newop->k.array.ranges) {
1253                         pr_debug("Failed to alloc indices for map\n");
1254                         free(newop);
1255                         return NULL;
1256                 }
1257         }
1258
1259         return newop;
1260 }
1261
1262 static struct bpf_map_priv *
1263 bpf_map_priv__clone(struct bpf_map_priv *priv)
1264 {
1265         struct bpf_map_priv *newpriv;
1266         struct bpf_map_op *pos, *newop;
1267
1268         newpriv = zalloc(sizeof(*newpriv));
1269         if (!newpriv) {
1270                 pr_debug("Not enough memory to alloc map private\n");
1271                 return NULL;
1272         }
1273         INIT_LIST_HEAD(&newpriv->ops_list);
1274
1275         list_for_each_entry(pos, &priv->ops_list, list) {
1276                 newop = bpf_map_op__clone(pos);
1277                 if (!newop) {
1278                         bpf_map_priv__purge(newpriv);
1279                         return NULL;
1280                 }
1281                 list_add_tail(&newop->list, &newpriv->ops_list);
1282         }
1283
1284         return newpriv;
1285 }
1286
1287 static int
1288 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1289 {
1290         const char *map_name = bpf_map__name(map);
1291         struct bpf_map_priv *priv = map_priv(map);
1292
1293         if (IS_ERR(priv)) {
1294                 pr_debug("Failed to get private from map %s\n", map_name);
1295                 return PTR_ERR(priv);
1296         }
1297
1298         if (!priv) {
1299                 priv = zalloc(sizeof(*priv));
1300                 if (!priv) {
1301                         pr_debug("Not enough memory to alloc map private\n");
1302                         return -ENOMEM;
1303                 }
1304                 INIT_LIST_HEAD(&priv->ops_list);
1305
1306                 if (map_set_priv(map, priv)) {
1307                         free(priv);
1308                         return -BPF_LOADER_ERRNO__INTERNAL;
1309                 }
1310         }
1311
1312         list_add_tail(&op->list, &priv->ops_list);
1313         return 0;
1314 }
1315
1316 static struct bpf_map_op *
1317 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1318 {
1319         struct bpf_map_op *op;
1320         int err;
1321
1322         op = bpf_map_op__new(term);
1323         if (IS_ERR(op))
1324                 return op;
1325
1326         err = bpf_map__add_op(map, op);
1327         if (err) {
1328                 bpf_map_op__delete(op);
1329                 return ERR_PTR(err);
1330         }
1331         return op;
1332 }
1333
1334 static int
1335 __bpf_map__config_value(struct bpf_map *map,
1336                         struct parse_events_term *term)
1337 {
1338         struct bpf_map_op *op;
1339         const char *map_name = bpf_map__name(map);
1340
1341         if (!map) {
1342                 pr_debug("Map '%s' is invalid\n", map_name);
1343                 return -BPF_LOADER_ERRNO__INTERNAL;
1344         }
1345
1346         if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1347                 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1348                          map_name);
1349                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1350         }
1351         if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1352                 pr_debug("Map %s has incorrect key size\n", map_name);
1353                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1354         }
1355         switch (bpf_map__value_size(map)) {
1356         case 1:
1357         case 2:
1358         case 4:
1359         case 8:
1360                 break;
1361         default:
1362                 pr_debug("Map %s has incorrect value size\n", map_name);
1363                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1364         }
1365
1366         op = bpf_map__add_newop(map, term);
1367         if (IS_ERR(op))
1368                 return PTR_ERR(op);
1369         op->op_type = BPF_MAP_OP_SET_VALUE;
1370         op->v.value = term->val.num;
1371         return 0;
1372 }
1373
1374 static int
1375 bpf_map__config_value(struct bpf_map *map,
1376                       struct parse_events_term *term,
1377                       struct evlist *evlist __maybe_unused)
1378 {
1379         if (!term->err_val) {
1380                 pr_debug("Config value not set\n");
1381                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1382         }
1383
1384         if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1385                 pr_debug("ERROR: wrong value type for 'value'\n");
1386                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1387         }
1388
1389         return __bpf_map__config_value(map, term);
1390 }
1391
1392 static int
1393 __bpf_map__config_event(struct bpf_map *map,
1394                         struct parse_events_term *term,
1395                         struct evlist *evlist)
1396 {
1397         struct bpf_map_op *op;
1398         const char *map_name = bpf_map__name(map);
1399         struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1400
1401         if (!evsel) {
1402                 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1403                          map_name, term->val.str);
1404                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1405         }
1406
1407         if (!map) {
1408                 pr_debug("Map '%s' is invalid\n", map_name);
1409                 return PTR_ERR(map);
1410         }
1411
1412         /*
1413          * No need to check key_size and value_size:
1414          * kernel has already checked them.
1415          */
1416         if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1417                 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1418                          map_name);
1419                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1420         }
1421
1422         op = bpf_map__add_newop(map, term);
1423         if (IS_ERR(op))
1424                 return PTR_ERR(op);
1425         op->op_type = BPF_MAP_OP_SET_EVSEL;
1426         op->v.evsel = evsel;
1427         return 0;
1428 }
1429
1430 static int
1431 bpf_map__config_event(struct bpf_map *map,
1432                       struct parse_events_term *term,
1433                       struct evlist *evlist)
1434 {
1435         if (!term->err_val) {
1436                 pr_debug("Config value not set\n");
1437                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1438         }
1439
1440         if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1441                 pr_debug("ERROR: wrong value type for 'event'\n");
1442                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1443         }
1444
1445         return __bpf_map__config_event(map, term, evlist);
1446 }
1447
1448 struct bpf_obj_config__map_func {
1449         const char *config_opt;
1450         int (*config_func)(struct bpf_map *, struct parse_events_term *,
1451                            struct evlist *);
1452 };
1453
1454 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1455         {"value", bpf_map__config_value},
1456         {"event", bpf_map__config_event},
1457 };
1458
1459 static int
1460 config_map_indices_range_check(struct parse_events_term *term,
1461                                struct bpf_map *map,
1462                                const char *map_name)
1463 {
1464         struct parse_events_array *array = &term->array;
1465         unsigned int i;
1466
1467         if (!array->nr_ranges)
1468                 return 0;
1469         if (!array->ranges) {
1470                 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1471                          map_name, (int)array->nr_ranges);
1472                 return -BPF_LOADER_ERRNO__INTERNAL;
1473         }
1474
1475         if (!map) {
1476                 pr_debug("Map '%s' is invalid\n", map_name);
1477                 return -BPF_LOADER_ERRNO__INTERNAL;
1478         }
1479
1480         for (i = 0; i < array->nr_ranges; i++) {
1481                 unsigned int start = array->ranges[i].start;
1482                 size_t length = array->ranges[i].length;
1483                 unsigned int idx = start + length - 1;
1484
1485                 if (idx >= bpf_map__max_entries(map)) {
1486                         pr_debug("ERROR: index %d too large\n", idx);
1487                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1488                 }
1489         }
1490         return 0;
1491 }
1492
1493 static int
1494 bpf__obj_config_map(struct bpf_object *obj,
1495                     struct parse_events_term *term,
1496                     struct evlist *evlist,
1497                     int *key_scan_pos)
1498 {
1499         /* key is "map:<mapname>.<config opt>" */
1500         char *map_name = strdup(term->config + sizeof("map:") - 1);
1501         struct bpf_map *map;
1502         int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1503         char *map_opt;
1504         size_t i;
1505
1506         if (!map_name)
1507                 return -ENOMEM;
1508
1509         map_opt = strchr(map_name, '.');
1510         if (!map_opt) {
1511                 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1512                 goto out;
1513         }
1514
1515         *map_opt++ = '\0';
1516         if (*map_opt == '\0') {
1517                 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1518                 goto out;
1519         }
1520
1521         map = bpf_object__find_map_by_name(obj, map_name);
1522         if (!map) {
1523                 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1524                 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1525                 goto out;
1526         }
1527
1528         *key_scan_pos += strlen(map_opt);
1529         err = config_map_indices_range_check(term, map, map_name);
1530         if (err)
1531                 goto out;
1532         *key_scan_pos -= strlen(map_opt);
1533
1534         for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1535                 struct bpf_obj_config__map_func *func =
1536                                 &bpf_obj_config__map_funcs[i];
1537
1538                 if (strcmp(map_opt, func->config_opt) == 0) {
1539                         err = func->config_func(map, term, evlist);
1540                         goto out;
1541                 }
1542         }
1543
1544         pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1545         err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1546 out:
1547         if (!err)
1548                 *key_scan_pos += strlen(map_opt);
1549
1550         free(map_name);
1551         return err;
1552 }
1553
1554 int bpf__config_obj(struct bpf_object *obj,
1555                     struct parse_events_term *term,
1556                     struct evlist *evlist,
1557                     int *error_pos)
1558 {
1559         int key_scan_pos = 0;
1560         int err;
1561
1562         if (!obj || !term || !term->config)
1563                 return -EINVAL;
1564
1565         if (strstarts(term->config, "map:")) {
1566                 key_scan_pos = sizeof("map:") - 1;
1567                 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1568                 goto out;
1569         }
1570         err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1571 out:
1572         if (error_pos)
1573                 *error_pos = key_scan_pos;
1574         return err;
1575
1576 }
1577
1578 typedef int (*map_config_func_t)(const char *name, int map_fd,
1579                                  const struct bpf_map *map,
1580                                  struct bpf_map_op *op,
1581                                  void *pkey, void *arg);
1582
1583 static int
1584 foreach_key_array_all(map_config_func_t func,
1585                       void *arg, const char *name,
1586                       int map_fd, const struct bpf_map *map,
1587                       struct bpf_map_op *op)
1588 {
1589         unsigned int i;
1590         int err;
1591
1592         for (i = 0; i < bpf_map__max_entries(map); i++) {
1593                 err = func(name, map_fd, map, op, &i, arg);
1594                 if (err) {
1595                         pr_debug("ERROR: failed to insert value to %s[%u]\n",
1596                                  name, i);
1597                         return err;
1598                 }
1599         }
1600         return 0;
1601 }
1602
1603 static int
1604 foreach_key_array_ranges(map_config_func_t func, void *arg,
1605                          const char *name, int map_fd,
1606                          const struct bpf_map *map,
1607                          struct bpf_map_op *op)
1608 {
1609         unsigned int i, j;
1610         int err;
1611
1612         for (i = 0; i < op->k.array.nr_ranges; i++) {
1613                 unsigned int start = op->k.array.ranges[i].start;
1614                 size_t length = op->k.array.ranges[i].length;
1615
1616                 for (j = 0; j < length; j++) {
1617                         unsigned int idx = start + j;
1618
1619                         err = func(name, map_fd, map, op, &idx, arg);
1620                         if (err) {
1621                                 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1622                                          name, idx);
1623                                 return err;
1624                         }
1625                 }
1626         }
1627         return 0;
1628 }
1629
1630 static int
1631 bpf_map_config_foreach_key(struct bpf_map *map,
1632                            map_config_func_t func,
1633                            void *arg)
1634 {
1635         int err, map_fd, type;
1636         struct bpf_map_op *op;
1637         const char *name = bpf_map__name(map);
1638         struct bpf_map_priv *priv = map_priv(map);
1639
1640         if (IS_ERR(priv)) {
1641                 pr_debug("ERROR: failed to get private from map %s\n", name);
1642                 return -BPF_LOADER_ERRNO__INTERNAL;
1643         }
1644         if (!priv || list_empty(&priv->ops_list)) {
1645                 pr_debug("INFO: nothing to config for map %s\n", name);
1646                 return 0;
1647         }
1648
1649         if (!map) {
1650                 pr_debug("Map '%s' is invalid\n", name);
1651                 return -BPF_LOADER_ERRNO__INTERNAL;
1652         }
1653         map_fd = bpf_map__fd(map);
1654         if (map_fd < 0) {
1655                 pr_debug("ERROR: failed to get fd from map %s\n", name);
1656                 return map_fd;
1657         }
1658
1659         type = bpf_map__type(map);
1660         list_for_each_entry(op, &priv->ops_list, list) {
1661                 switch (type) {
1662                 case BPF_MAP_TYPE_ARRAY:
1663                 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1664                         switch (op->key_type) {
1665                         case BPF_MAP_KEY_ALL:
1666                                 err = foreach_key_array_all(func, arg, name,
1667                                                             map_fd, map, op);
1668                                 break;
1669                         case BPF_MAP_KEY_RANGES:
1670                                 err = foreach_key_array_ranges(func, arg, name,
1671                                                                map_fd, map, op);
1672                                 break;
1673                         default:
1674                                 pr_debug("ERROR: keytype for map '%s' invalid\n",
1675                                          name);
1676                                 return -BPF_LOADER_ERRNO__INTERNAL;
1677                         }
1678                         if (err)
1679                                 return err;
1680                         break;
1681                 default:
1682                         pr_debug("ERROR: type of '%s' incorrect\n", name);
1683                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1684                 }
1685         }
1686
1687         return 0;
1688 }
1689
1690 static int
1691 apply_config_value_for_key(int map_fd, void *pkey,
1692                            size_t val_size, u64 val)
1693 {
1694         int err = 0;
1695
1696         switch (val_size) {
1697         case 1: {
1698                 u8 _val = (u8)(val);
1699                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1700                 break;
1701         }
1702         case 2: {
1703                 u16 _val = (u16)(val);
1704                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1705                 break;
1706         }
1707         case 4: {
1708                 u32 _val = (u32)(val);
1709                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1710                 break;
1711         }
1712         case 8: {
1713                 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1714                 break;
1715         }
1716         default:
1717                 pr_debug("ERROR: invalid value size\n");
1718                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1719         }
1720         if (err && errno)
1721                 err = -errno;
1722         return err;
1723 }
1724
1725 static int
1726 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1727                            struct evsel *evsel)
1728 {
1729         struct xyarray *xy = evsel->core.fd;
1730         struct perf_event_attr *attr;
1731         unsigned int key, events;
1732         bool check_pass = false;
1733         int *evt_fd;
1734         int err;
1735
1736         if (!xy) {
1737                 pr_debug("ERROR: evsel not ready for map %s\n", name);
1738                 return -BPF_LOADER_ERRNO__INTERNAL;
1739         }
1740
1741         if (xy->row_size / xy->entry_size != 1) {
1742                 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1743                          name);
1744                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1745         }
1746
1747         attr = &evsel->core.attr;
1748         if (attr->inherit) {
1749                 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1750                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1751         }
1752
1753         if (evsel__is_bpf_output(evsel))
1754                 check_pass = true;
1755         if (attr->type == PERF_TYPE_RAW)
1756                 check_pass = true;
1757         if (attr->type == PERF_TYPE_HARDWARE)
1758                 check_pass = true;
1759         if (!check_pass) {
1760                 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1761                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1762         }
1763
1764         events = xy->entries / (xy->row_size / xy->entry_size);
1765         key = *((unsigned int *)pkey);
1766         if (key >= events) {
1767                 pr_debug("ERROR: there is no event %d for map %s\n",
1768                          key, name);
1769                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1770         }
1771         evt_fd = xyarray__entry(xy, key, 0);
1772         err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1773         if (err && errno)
1774                 err = -errno;
1775         return err;
1776 }
1777
1778 static int
1779 apply_obj_config_map_for_key(const char *name, int map_fd,
1780                              const struct bpf_map *map,
1781                              struct bpf_map_op *op,
1782                              void *pkey, void *arg __maybe_unused)
1783 {
1784         int err;
1785
1786         switch (op->op_type) {
1787         case BPF_MAP_OP_SET_VALUE:
1788                 err = apply_config_value_for_key(map_fd, pkey,
1789                                                  bpf_map__value_size(map),
1790                                                  op->v.value);
1791                 break;
1792         case BPF_MAP_OP_SET_EVSEL:
1793                 err = apply_config_evsel_for_key(name, map_fd, pkey,
1794                                                  op->v.evsel);
1795                 break;
1796         default:
1797                 pr_debug("ERROR: unknown value type for '%s'\n", name);
1798                 err = -BPF_LOADER_ERRNO__INTERNAL;
1799         }
1800         return err;
1801 }
1802
1803 static int
1804 apply_obj_config_map(struct bpf_map *map)
1805 {
1806         return bpf_map_config_foreach_key(map,
1807                                           apply_obj_config_map_for_key,
1808                                           NULL);
1809 }
1810
1811 static int
1812 apply_obj_config_object(struct bpf_object *obj)
1813 {
1814         struct bpf_map *map;
1815         int err;
1816
1817         bpf_object__for_each_map(map, obj) {
1818                 err = apply_obj_config_map(map);
1819                 if (err)
1820                         return err;
1821         }
1822         return 0;
1823 }
1824
1825 int bpf__apply_obj_config(void)
1826 {
1827         struct bpf_perf_object *perf_obj, *tmp;
1828         int err;
1829
1830         bpf_perf_object__for_each(perf_obj, tmp) {
1831                 err = apply_obj_config_object(perf_obj->obj);
1832                 if (err)
1833                         return err;
1834         }
1835
1836         return 0;
1837 }
1838
1839 #define bpf__perf_for_each_map(map, pobj, tmp)                  \
1840         bpf_perf_object__for_each(pobj, tmp)                    \
1841                 bpf_object__for_each_map(map, pobj->obj)
1842
1843 #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name)  \
1844         bpf__perf_for_each_map(map, pobj, pobjtmp)              \
1845                 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1846
1847 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1848 {
1849         struct bpf_map_priv *tmpl_priv = NULL;
1850         struct bpf_perf_object *perf_obj, *tmp;
1851         struct evsel *evsel = NULL;
1852         struct bpf_map *map;
1853         int err;
1854         bool need_init = false;
1855
1856         bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1857                 struct bpf_map_priv *priv = map_priv(map);
1858
1859                 if (IS_ERR(priv))
1860                         return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1861
1862                 /*
1863                  * No need to check map type: type should have been
1864                  * verified by kernel.
1865                  */
1866                 if (!need_init && !priv)
1867                         need_init = !priv;
1868                 if (!tmpl_priv && priv)
1869                         tmpl_priv = priv;
1870         }
1871
1872         if (!need_init)
1873                 return NULL;
1874
1875         if (!tmpl_priv) {
1876                 char *event_definition = NULL;
1877
1878                 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1879                         return ERR_PTR(-ENOMEM);
1880
1881                 err = parse_event(evlist, event_definition);
1882                 free(event_definition);
1883
1884                 if (err) {
1885                         pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1886                         return ERR_PTR(-err);
1887                 }
1888
1889                 evsel = evlist__last(evlist);
1890         }
1891
1892         bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1893                 struct bpf_map_priv *priv = map_priv(map);
1894
1895                 if (IS_ERR(priv))
1896                         return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1897                 if (priv)
1898                         continue;
1899
1900                 if (tmpl_priv) {
1901                         priv = bpf_map_priv__clone(tmpl_priv);
1902                         if (!priv)
1903                                 return ERR_PTR(-ENOMEM);
1904
1905                         err = map_set_priv(map, priv);
1906                         if (err) {
1907                                 bpf_map_priv__clear(map, priv);
1908                                 return ERR_PTR(err);
1909                         }
1910                 } else if (evsel) {
1911                         struct bpf_map_op *op;
1912
1913                         op = bpf_map__add_newop(map, NULL);
1914                         if (IS_ERR(op))
1915                                 return ERR_CAST(op);
1916                         op->op_type = BPF_MAP_OP_SET_EVSEL;
1917                         op->v.evsel = evsel;
1918                 }
1919         }
1920
1921         return evsel;
1922 }
1923
1924 int bpf__setup_stdout(struct evlist *evlist)
1925 {
1926         struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1927         return PTR_ERR_OR_ZERO(evsel);
1928 }
1929
1930 #define ERRNO_OFFSET(e)         ((e) - __BPF_LOADER_ERRNO__START)
1931 #define ERRCODE_OFFSET(c)       ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1932 #define NR_ERRNO        (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1933
1934 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1935         [ERRCODE_OFFSET(CONFIG)]        = "Invalid config string",
1936         [ERRCODE_OFFSET(GROUP)]         = "Invalid group name",
1937         [ERRCODE_OFFSET(EVENTNAME)]     = "No event name found in config string",
1938         [ERRCODE_OFFSET(INTERNAL)]      = "BPF loader internal error",
1939         [ERRCODE_OFFSET(COMPILE)]       = "Error when compiling BPF scriptlet",
1940         [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1941         [ERRCODE_OFFSET(PROLOGUE)]      = "Failed to generate prologue",
1942         [ERRCODE_OFFSET(PROLOGUE2BIG)]  = "Prologue too big for program",
1943         [ERRCODE_OFFSET(PROLOGUEOOB)]   = "Offset out of bound for prologue",
1944         [ERRCODE_OFFSET(OBJCONF_OPT)]   = "Invalid object config option",
1945         [ERRCODE_OFFSET(OBJCONF_CONF)]  = "Config value not set (missing '=')",
1946         [ERRCODE_OFFSET(OBJCONF_MAP_OPT)]       = "Invalid object map config option",
1947         [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)]  = "Target map doesn't exist",
1948         [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)]     = "Incorrect value type for map",
1949         [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)]      = "Incorrect map type",
1950         [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)]   = "Incorrect map key size",
1951         [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1952         [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)]     = "Event not found for map setting",
1953         [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)]   = "Invalid map size for event setting",
1954         [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)]    = "Event dimension too large",
1955         [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)]    = "Doesn't support inherit event",
1956         [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)]   = "Wrong event type for map",
1957         [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)]   = "Index too large",
1958 };
1959
1960 static int
1961 bpf_loader_strerror(int err, char *buf, size_t size)
1962 {
1963         char sbuf[STRERR_BUFSIZE];
1964         const char *msg;
1965
1966         if (!buf || !size)
1967                 return -1;
1968
1969         err = err > 0 ? err : -err;
1970
1971         if (err >= __LIBBPF_ERRNO__START)
1972                 return libbpf_strerror(err, buf, size);
1973
1974         if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1975                 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1976                 snprintf(buf, size, "%s", msg);
1977                 buf[size - 1] = '\0';
1978                 return 0;
1979         }
1980
1981         if (err >= __BPF_LOADER_ERRNO__END)
1982                 snprintf(buf, size, "Unknown bpf loader error %d", err);
1983         else
1984                 snprintf(buf, size, "%s",
1985                          str_error_r(err, sbuf, sizeof(sbuf)));
1986
1987         buf[size - 1] = '\0';
1988         return -1;
1989 }
1990
1991 #define bpf__strerror_head(err, buf, size) \
1992         char sbuf[STRERR_BUFSIZE], *emsg;\
1993         if (!size)\
1994                 return 0;\
1995         if (err < 0)\
1996                 err = -err;\
1997         bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1998         emsg = sbuf;\
1999         switch (err) {\
2000         default:\
2001                 scnprintf(buf, size, "%s", emsg);\
2002                 break;
2003
2004 #define bpf__strerror_entry(val, fmt...)\
2005         case val: {\
2006                 scnprintf(buf, size, fmt);\
2007                 break;\
2008         }
2009
2010 #define bpf__strerror_end(buf, size)\
2011         }\
2012         buf[size - 1] = '\0';
2013
2014 int bpf__strerror_prepare_load(const char *filename, bool source,
2015                                int err, char *buf, size_t size)
2016 {
2017         size_t n;
2018         int ret;
2019
2020         n = snprintf(buf, size, "Failed to load %s%s: ",
2021                          filename, source ? " from source" : "");
2022         if (n >= size) {
2023                 buf[size - 1] = '\0';
2024                 return 0;
2025         }
2026         buf += n;
2027         size -= n;
2028
2029         ret = bpf_loader_strerror(err, buf, size);
2030         buf[size - 1] = '\0';
2031         return ret;
2032 }
2033
2034 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
2035                         int err, char *buf, size_t size)
2036 {
2037         bpf__strerror_head(err, buf, size);
2038         case BPF_LOADER_ERRNO__PROGCONF_TERM: {
2039                 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
2040                 break;
2041         }
2042         bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
2043         bpf__strerror_entry(EACCES, "You need to be root");
2044         bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
2045         bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
2046         bpf__strerror_end(buf, size);
2047         return 0;
2048 }
2049
2050 int bpf__strerror_load(struct bpf_object *obj,
2051                        int err, char *buf, size_t size)
2052 {
2053         bpf__strerror_head(err, buf, size);
2054         case LIBBPF_ERRNO__KVER: {
2055                 unsigned int obj_kver = bpf_object__kversion(obj);
2056                 unsigned int real_kver;
2057
2058                 if (fetch_kernel_version(&real_kver, NULL, 0)) {
2059                         scnprintf(buf, size, "Unable to fetch kernel version");
2060                         break;
2061                 }
2062
2063                 if (obj_kver != real_kver) {
2064                         scnprintf(buf, size,
2065                                   "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
2066                                   KVER_PARAM(obj_kver),
2067                                   KVER_PARAM(real_kver));
2068                         break;
2069                 }
2070
2071                 scnprintf(buf, size, "Failed to load program for unknown reason");
2072                 break;
2073         }
2074         bpf__strerror_end(buf, size);
2075         return 0;
2076 }
2077
2078 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
2079                              struct parse_events_term *term __maybe_unused,
2080                              struct evlist *evlist __maybe_unused,
2081                              int *error_pos __maybe_unused, int err,
2082                              char *buf, size_t size)
2083 {
2084         bpf__strerror_head(err, buf, size);
2085         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
2086                             "Can't use this config term with this map type");
2087         bpf__strerror_end(buf, size);
2088         return 0;
2089 }
2090
2091 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
2092 {
2093         bpf__strerror_head(err, buf, size);
2094         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
2095                             "Cannot set event to BPF map in multi-thread tracing");
2096         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
2097                             "%s (Hint: use -i to turn off inherit)", emsg);
2098         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
2099                             "Can only put raw, hardware and BPF output event into a BPF map");
2100         bpf__strerror_end(buf, size);
2101         return 0;
2102 }
2103
2104 int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
2105                                      int err, char *buf, size_t size)
2106 {
2107         bpf__strerror_head(err, buf, size);
2108         bpf__strerror_end(buf, size);
2109         return 0;
2110 }