libbpf: Free up resources used by inner map definition
[platform/kernel/linux-rpi.git] / tools / lib / bpf / libbpf.c
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/list.h>
35 #include <linux/limits.h>
36 #include <linux/perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <linux/version.h>
39 #include <sys/epoll.h>
40 #include <sys/ioctl.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/types.h>
44 #include <sys/vfs.h>
45 #include <sys/utsname.h>
46 #include <sys/resource.h>
47 #include <libelf.h>
48 #include <gelf.h>
49 #include <zlib.h>
50
51 #include "libbpf.h"
52 #include "bpf.h"
53 #include "btf.h"
54 #include "str_error.h"
55 #include "libbpf_internal.h"
56 #include "hashmap.h"
57 #include "bpf_gen_internal.h"
58
59 #ifndef BPF_FS_MAGIC
60 #define BPF_FS_MAGIC            0xcafe4a11
61 #endif
62
63 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
64
65 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
66  * compilation if user enables corresponding warning. Disable it explicitly.
67  */
68 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
69
70 #define __printf(a, b)  __attribute__((format(printf, a, b)))
71
72 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
73 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
74
75 static int __base_pr(enum libbpf_print_level level, const char *format,
76                      va_list args)
77 {
78         if (level == LIBBPF_DEBUG)
79                 return 0;
80
81         return vfprintf(stderr, format, args);
82 }
83
84 static libbpf_print_fn_t __libbpf_pr = __base_pr;
85
86 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
87 {
88         libbpf_print_fn_t old_print_fn = __libbpf_pr;
89
90         __libbpf_pr = fn;
91         return old_print_fn;
92 }
93
94 __printf(2, 3)
95 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
96 {
97         va_list args;
98
99         if (!__libbpf_pr)
100                 return;
101
102         va_start(args, format);
103         __libbpf_pr(level, format, args);
104         va_end(args);
105 }
106
107 static void pr_perm_msg(int err)
108 {
109         struct rlimit limit;
110         char buf[100];
111
112         if (err != -EPERM || geteuid() != 0)
113                 return;
114
115         err = getrlimit(RLIMIT_MEMLOCK, &limit);
116         if (err)
117                 return;
118
119         if (limit.rlim_cur == RLIM_INFINITY)
120                 return;
121
122         if (limit.rlim_cur < 1024)
123                 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
124         else if (limit.rlim_cur < 1024*1024)
125                 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
126         else
127                 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
128
129         pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
130                 buf);
131 }
132
133 #define STRERR_BUFSIZE  128
134
135 /* Copied from tools/perf/util/util.h */
136 #ifndef zfree
137 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
138 #endif
139
140 #ifndef zclose
141 # define zclose(fd) ({                  \
142         int ___err = 0;                 \
143         if ((fd) >= 0)                  \
144                 ___err = close((fd));   \
145         fd = -1;                        \
146         ___err; })
147 #endif
148
149 static inline __u64 ptr_to_u64(const void *ptr)
150 {
151         return (__u64) (unsigned long) ptr;
152 }
153
154 /* this goes away in libbpf 1.0 */
155 enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
156
157 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
158 {
159         /* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to
160          * get all possible values we compensate last +1, and then (2*x - 1)
161          * to get the bit mask
162          */
163         if (mode != LIBBPF_STRICT_ALL
164             && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1)))
165                 return errno = EINVAL, -EINVAL;
166
167         libbpf_mode = mode;
168         return 0;
169 }
170
171 enum kern_feature_id {
172         /* v4.14: kernel support for program & map names. */
173         FEAT_PROG_NAME,
174         /* v5.2: kernel support for global data sections. */
175         FEAT_GLOBAL_DATA,
176         /* BTF support */
177         FEAT_BTF,
178         /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
179         FEAT_BTF_FUNC,
180         /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
181         FEAT_BTF_DATASEC,
182         /* BTF_FUNC_GLOBAL is supported */
183         FEAT_BTF_GLOBAL_FUNC,
184         /* BPF_F_MMAPABLE is supported for arrays */
185         FEAT_ARRAY_MMAP,
186         /* kernel support for expected_attach_type in BPF_PROG_LOAD */
187         FEAT_EXP_ATTACH_TYPE,
188         /* bpf_probe_read_{kernel,user}[_str] helpers */
189         FEAT_PROBE_READ_KERN,
190         /* BPF_PROG_BIND_MAP is supported */
191         FEAT_PROG_BIND_MAP,
192         /* Kernel support for module BTFs */
193         FEAT_MODULE_BTF,
194         /* BTF_KIND_FLOAT support */
195         FEAT_BTF_FLOAT,
196         /* BPF perf link support */
197         FEAT_PERF_LINK,
198         __FEAT_CNT,
199 };
200
201 static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
202
203 enum reloc_type {
204         RELO_LD64,
205         RELO_CALL,
206         RELO_DATA,
207         RELO_EXTERN_VAR,
208         RELO_EXTERN_FUNC,
209         RELO_SUBPROG_ADDR,
210 };
211
212 struct reloc_desc {
213         enum reloc_type type;
214         int insn_idx;
215         int map_idx;
216         int sym_off;
217 };
218
219 struct bpf_sec_def;
220
221 typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
222                                         struct bpf_program *prog);
223
224 struct bpf_sec_def {
225         const char *sec;
226         size_t len;
227         enum bpf_prog_type prog_type;
228         enum bpf_attach_type expected_attach_type;
229         bool is_exp_attach_type_optional;
230         bool is_attachable;
231         bool is_attach_btf;
232         bool is_sleepable;
233         attach_fn_t attach_fn;
234 };
235
236 /*
237  * bpf_prog should be a better name but it has been used in
238  * linux/filter.h.
239  */
240 struct bpf_program {
241         const struct bpf_sec_def *sec_def;
242         char *sec_name;
243         size_t sec_idx;
244         /* this program's instruction offset (in number of instructions)
245          * within its containing ELF section
246          */
247         size_t sec_insn_off;
248         /* number of original instructions in ELF section belonging to this
249          * program, not taking into account subprogram instructions possible
250          * appended later during relocation
251          */
252         size_t sec_insn_cnt;
253         /* Offset (in number of instructions) of the start of instruction
254          * belonging to this BPF program  within its containing main BPF
255          * program. For the entry-point (main) BPF program, this is always
256          * zero. For a sub-program, this gets reset before each of main BPF
257          * programs are processed and relocated and is used to determined
258          * whether sub-program was already appended to the main program, and
259          * if yes, at which instruction offset.
260          */
261         size_t sub_insn_off;
262
263         char *name;
264         /* sec_name with / replaced by _; makes recursive pinning
265          * in bpf_object__pin_programs easier
266          */
267         char *pin_name;
268
269         /* instructions that belong to BPF program; insns[0] is located at
270          * sec_insn_off instruction within its ELF section in ELF file, so
271          * when mapping ELF file instruction index to the local instruction,
272          * one needs to subtract sec_insn_off; and vice versa.
273          */
274         struct bpf_insn *insns;
275         /* actual number of instruction in this BPF program's image; for
276          * entry-point BPF programs this includes the size of main program
277          * itself plus all the used sub-programs, appended at the end
278          */
279         size_t insns_cnt;
280
281         struct reloc_desc *reloc_desc;
282         int nr_reloc;
283         int log_level;
284
285         struct {
286                 int nr;
287                 int *fds;
288         } instances;
289         bpf_program_prep_t preprocessor;
290
291         struct bpf_object *obj;
292         void *priv;
293         bpf_program_clear_priv_t clear_priv;
294
295         bool load;
296         bool mark_btf_static;
297         enum bpf_prog_type type;
298         enum bpf_attach_type expected_attach_type;
299         int prog_ifindex;
300         __u32 attach_btf_obj_fd;
301         __u32 attach_btf_id;
302         __u32 attach_prog_fd;
303         void *func_info;
304         __u32 func_info_rec_size;
305         __u32 func_info_cnt;
306
307         void *line_info;
308         __u32 line_info_rec_size;
309         __u32 line_info_cnt;
310         __u32 prog_flags;
311 };
312
313 struct bpf_struct_ops {
314         const char *tname;
315         const struct btf_type *type;
316         struct bpf_program **progs;
317         __u32 *kern_func_off;
318         /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
319         void *data;
320         /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
321          *      btf_vmlinux's format.
322          * struct bpf_struct_ops_tcp_congestion_ops {
323          *      [... some other kernel fields ...]
324          *      struct tcp_congestion_ops data;
325          * }
326          * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
327          * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
328          * from "data".
329          */
330         void *kern_vdata;
331         __u32 type_id;
332 };
333
334 #define DATA_SEC ".data"
335 #define BSS_SEC ".bss"
336 #define RODATA_SEC ".rodata"
337 #define KCONFIG_SEC ".kconfig"
338 #define KSYMS_SEC ".ksyms"
339 #define STRUCT_OPS_SEC ".struct_ops"
340
341 enum libbpf_map_type {
342         LIBBPF_MAP_UNSPEC,
343         LIBBPF_MAP_DATA,
344         LIBBPF_MAP_BSS,
345         LIBBPF_MAP_RODATA,
346         LIBBPF_MAP_KCONFIG,
347 };
348
349 static const char * const libbpf_type_to_btf_name[] = {
350         [LIBBPF_MAP_DATA]       = DATA_SEC,
351         [LIBBPF_MAP_BSS]        = BSS_SEC,
352         [LIBBPF_MAP_RODATA]     = RODATA_SEC,
353         [LIBBPF_MAP_KCONFIG]    = KCONFIG_SEC,
354 };
355
356 struct bpf_map {
357         char *name;
358         int fd;
359         int sec_idx;
360         size_t sec_offset;
361         int map_ifindex;
362         int inner_map_fd;
363         struct bpf_map_def def;
364         __u32 numa_node;
365         __u32 btf_var_idx;
366         __u32 btf_key_type_id;
367         __u32 btf_value_type_id;
368         __u32 btf_vmlinux_value_type_id;
369         void *priv;
370         bpf_map_clear_priv_t clear_priv;
371         enum libbpf_map_type libbpf_type;
372         void *mmaped;
373         struct bpf_struct_ops *st_ops;
374         struct bpf_map *inner_map;
375         void **init_slots;
376         int init_slots_sz;
377         char *pin_path;
378         bool pinned;
379         bool reused;
380 };
381
382 enum extern_type {
383         EXT_UNKNOWN,
384         EXT_KCFG,
385         EXT_KSYM,
386 };
387
388 enum kcfg_type {
389         KCFG_UNKNOWN,
390         KCFG_CHAR,
391         KCFG_BOOL,
392         KCFG_INT,
393         KCFG_TRISTATE,
394         KCFG_CHAR_ARR,
395 };
396
397 struct extern_desc {
398         enum extern_type type;
399         int sym_idx;
400         int btf_id;
401         int sec_btf_id;
402         const char *name;
403         bool is_set;
404         bool is_weak;
405         union {
406                 struct {
407                         enum kcfg_type type;
408                         int sz;
409                         int align;
410                         int data_off;
411                         bool is_signed;
412                 } kcfg;
413                 struct {
414                         unsigned long long addr;
415
416                         /* target btf_id of the corresponding kernel var. */
417                         int kernel_btf_obj_fd;
418                         int kernel_btf_id;
419
420                         /* local btf_id of the ksym extern's type. */
421                         __u32 type_id;
422                 } ksym;
423         };
424 };
425
426 static LIST_HEAD(bpf_objects_list);
427
428 struct module_btf {
429         struct btf *btf;
430         char *name;
431         __u32 id;
432         int fd;
433 };
434
435 struct bpf_object {
436         char name[BPF_OBJ_NAME_LEN];
437         char license[64];
438         __u32 kern_version;
439
440         struct bpf_program *programs;
441         size_t nr_programs;
442         struct bpf_map *maps;
443         size_t nr_maps;
444         size_t maps_cap;
445
446         char *kconfig;
447         struct extern_desc *externs;
448         int nr_extern;
449         int kconfig_map_idx;
450         int rodata_map_idx;
451
452         bool loaded;
453         bool has_subcalls;
454
455         struct bpf_gen *gen_loader;
456
457         /*
458          * Information when doing elf related work. Only valid if fd
459          * is valid.
460          */
461         struct {
462                 int fd;
463                 const void *obj_buf;
464                 size_t obj_buf_sz;
465                 Elf *elf;
466                 GElf_Ehdr ehdr;
467                 Elf_Data *symbols;
468                 Elf_Data *data;
469                 Elf_Data *rodata;
470                 Elf_Data *bss;
471                 Elf_Data *st_ops_data;
472                 size_t shstrndx; /* section index for section name strings */
473                 size_t strtabidx;
474                 struct {
475                         GElf_Shdr shdr;
476                         Elf_Data *data;
477                 } *reloc_sects;
478                 int nr_reloc_sects;
479                 int maps_shndx;
480                 int btf_maps_shndx;
481                 __u32 btf_maps_sec_btf_id;
482                 int text_shndx;
483                 int symbols_shndx;
484                 int data_shndx;
485                 int rodata_shndx;
486                 int bss_shndx;
487                 int st_ops_shndx;
488         } efile;
489         /*
490          * All loaded bpf_object is linked in a list, which is
491          * hidden to caller. bpf_objects__<func> handlers deal with
492          * all objects.
493          */
494         struct list_head list;
495
496         struct btf *btf;
497         struct btf_ext *btf_ext;
498
499         /* Parse and load BTF vmlinux if any of the programs in the object need
500          * it at load time.
501          */
502         struct btf *btf_vmlinux;
503         /* Path to the custom BTF to be used for BPF CO-RE relocations as an
504          * override for vmlinux BTF.
505          */
506         char *btf_custom_path;
507         /* vmlinux BTF override for CO-RE relocations */
508         struct btf *btf_vmlinux_override;
509         /* Lazily initialized kernel module BTFs */
510         struct module_btf *btf_modules;
511         bool btf_modules_loaded;
512         size_t btf_module_cnt;
513         size_t btf_module_cap;
514
515         void *priv;
516         bpf_object_clear_priv_t clear_priv;
517
518         char path[];
519 };
520 #define obj_elf_valid(o)        ((o)->efile.elf)
521
522 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
523 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
524 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
525 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
526 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
527 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
528 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
529
530 void bpf_program__unload(struct bpf_program *prog)
531 {
532         int i;
533
534         if (!prog)
535                 return;
536
537         /*
538          * If the object is opened but the program was never loaded,
539          * it is possible that prog->instances.nr == -1.
540          */
541         if (prog->instances.nr > 0) {
542                 for (i = 0; i < prog->instances.nr; i++)
543                         zclose(prog->instances.fds[i]);
544         } else if (prog->instances.nr != -1) {
545                 pr_warn("Internal error: instances.nr is %d\n",
546                         prog->instances.nr);
547         }
548
549         prog->instances.nr = -1;
550         zfree(&prog->instances.fds);
551
552         zfree(&prog->func_info);
553         zfree(&prog->line_info);
554 }
555
556 static void bpf_program__exit(struct bpf_program *prog)
557 {
558         if (!prog)
559                 return;
560
561         if (prog->clear_priv)
562                 prog->clear_priv(prog, prog->priv);
563
564         prog->priv = NULL;
565         prog->clear_priv = NULL;
566
567         bpf_program__unload(prog);
568         zfree(&prog->name);
569         zfree(&prog->sec_name);
570         zfree(&prog->pin_name);
571         zfree(&prog->insns);
572         zfree(&prog->reloc_desc);
573
574         prog->nr_reloc = 0;
575         prog->insns_cnt = 0;
576         prog->sec_idx = -1;
577 }
578
579 static char *__bpf_program__pin_name(struct bpf_program *prog)
580 {
581         char *name, *p;
582
583         name = p = strdup(prog->sec_name);
584         while ((p = strchr(p, '/')))
585                 *p = '_';
586
587         return name;
588 }
589
590 static bool insn_is_subprog_call(const struct bpf_insn *insn)
591 {
592         return BPF_CLASS(insn->code) == BPF_JMP &&
593                BPF_OP(insn->code) == BPF_CALL &&
594                BPF_SRC(insn->code) == BPF_K &&
595                insn->src_reg == BPF_PSEUDO_CALL &&
596                insn->dst_reg == 0 &&
597                insn->off == 0;
598 }
599
600 static bool is_call_insn(const struct bpf_insn *insn)
601 {
602         return insn->code == (BPF_JMP | BPF_CALL);
603 }
604
605 static bool insn_is_pseudo_func(struct bpf_insn *insn)
606 {
607         return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
608 }
609
610 static int
611 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
612                       const char *name, size_t sec_idx, const char *sec_name,
613                       size_t sec_off, void *insn_data, size_t insn_data_sz)
614 {
615         if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
616                 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
617                         sec_name, name, sec_off, insn_data_sz);
618                 return -EINVAL;
619         }
620
621         memset(prog, 0, sizeof(*prog));
622         prog->obj = obj;
623
624         prog->sec_idx = sec_idx;
625         prog->sec_insn_off = sec_off / BPF_INSN_SZ;
626         prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
627         /* insns_cnt can later be increased by appending used subprograms */
628         prog->insns_cnt = prog->sec_insn_cnt;
629
630         prog->type = BPF_PROG_TYPE_UNSPEC;
631         prog->load = true;
632
633         prog->instances.fds = NULL;
634         prog->instances.nr = -1;
635
636         prog->sec_name = strdup(sec_name);
637         if (!prog->sec_name)
638                 goto errout;
639
640         prog->name = strdup(name);
641         if (!prog->name)
642                 goto errout;
643
644         prog->pin_name = __bpf_program__pin_name(prog);
645         if (!prog->pin_name)
646                 goto errout;
647
648         prog->insns = malloc(insn_data_sz);
649         if (!prog->insns)
650                 goto errout;
651         memcpy(prog->insns, insn_data, insn_data_sz);
652
653         return 0;
654 errout:
655         pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
656         bpf_program__exit(prog);
657         return -ENOMEM;
658 }
659
660 static int
661 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
662                          const char *sec_name, int sec_idx)
663 {
664         Elf_Data *symbols = obj->efile.symbols;
665         struct bpf_program *prog, *progs;
666         void *data = sec_data->d_buf;
667         size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
668         int nr_progs, err, i;
669         const char *name;
670         GElf_Sym sym;
671
672         progs = obj->programs;
673         nr_progs = obj->nr_programs;
674         nr_syms = symbols->d_size / sizeof(GElf_Sym);
675         sec_off = 0;
676
677         for (i = 0; i < nr_syms; i++) {
678                 if (!gelf_getsym(symbols, i, &sym))
679                         continue;
680                 if (sym.st_shndx != sec_idx)
681                         continue;
682                 if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
683                         continue;
684
685                 prog_sz = sym.st_size;
686                 sec_off = sym.st_value;
687
688                 name = elf_sym_str(obj, sym.st_name);
689                 if (!name) {
690                         pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
691                                 sec_name, sec_off);
692                         return -LIBBPF_ERRNO__FORMAT;
693                 }
694
695                 if (sec_off + prog_sz > sec_sz) {
696                         pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
697                                 sec_name, sec_off);
698                         return -LIBBPF_ERRNO__FORMAT;
699                 }
700
701                 if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
702                         pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
703                         return -ENOTSUP;
704                 }
705
706                 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
707                          sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
708
709                 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
710                 if (!progs) {
711                         /*
712                          * In this case the original obj->programs
713                          * is still valid, so don't need special treat for
714                          * bpf_close_object().
715                          */
716                         pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
717                                 sec_name, name);
718                         return -ENOMEM;
719                 }
720                 obj->programs = progs;
721
722                 prog = &progs[nr_progs];
723
724                 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
725                                             sec_off, data + sec_off, prog_sz);
726                 if (err)
727                         return err;
728
729                 /* if function is a global/weak symbol, but has restricted
730                  * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
731                  * as static to enable more permissive BPF verification mode
732                  * with more outside context available to BPF verifier
733                  */
734                 if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
735                     && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
736                         || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
737                         prog->mark_btf_static = true;
738
739                 nr_progs++;
740                 obj->nr_programs = nr_progs;
741         }
742
743         return 0;
744 }
745
746 static __u32 get_kernel_version(void)
747 {
748         __u32 major, minor, patch;
749         struct utsname info;
750
751         uname(&info);
752         if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
753                 return 0;
754         return KERNEL_VERSION(major, minor, patch);
755 }
756
757 static const struct btf_member *
758 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
759 {
760         struct btf_member *m;
761         int i;
762
763         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
764                 if (btf_member_bit_offset(t, i) == bit_offset)
765                         return m;
766         }
767
768         return NULL;
769 }
770
771 static const struct btf_member *
772 find_member_by_name(const struct btf *btf, const struct btf_type *t,
773                     const char *name)
774 {
775         struct btf_member *m;
776         int i;
777
778         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
779                 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
780                         return m;
781         }
782
783         return NULL;
784 }
785
786 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
787 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
788                                    const char *name, __u32 kind);
789
790 static int
791 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
792                            const struct btf_type **type, __u32 *type_id,
793                            const struct btf_type **vtype, __u32 *vtype_id,
794                            const struct btf_member **data_member)
795 {
796         const struct btf_type *kern_type, *kern_vtype;
797         const struct btf_member *kern_data_member;
798         __s32 kern_vtype_id, kern_type_id;
799         __u32 i;
800
801         kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
802         if (kern_type_id < 0) {
803                 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
804                         tname);
805                 return kern_type_id;
806         }
807         kern_type = btf__type_by_id(btf, kern_type_id);
808
809         /* Find the corresponding "map_value" type that will be used
810          * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
811          * find "struct bpf_struct_ops_tcp_congestion_ops" from the
812          * btf_vmlinux.
813          */
814         kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
815                                                 tname, BTF_KIND_STRUCT);
816         if (kern_vtype_id < 0) {
817                 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
818                         STRUCT_OPS_VALUE_PREFIX, tname);
819                 return kern_vtype_id;
820         }
821         kern_vtype = btf__type_by_id(btf, kern_vtype_id);
822
823         /* Find "struct tcp_congestion_ops" from
824          * struct bpf_struct_ops_tcp_congestion_ops {
825          *      [ ... ]
826          *      struct tcp_congestion_ops data;
827          * }
828          */
829         kern_data_member = btf_members(kern_vtype);
830         for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
831                 if (kern_data_member->type == kern_type_id)
832                         break;
833         }
834         if (i == btf_vlen(kern_vtype)) {
835                 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
836                         tname, STRUCT_OPS_VALUE_PREFIX, tname);
837                 return -EINVAL;
838         }
839
840         *type = kern_type;
841         *type_id = kern_type_id;
842         *vtype = kern_vtype;
843         *vtype_id = kern_vtype_id;
844         *data_member = kern_data_member;
845
846         return 0;
847 }
848
849 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
850 {
851         return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
852 }
853
854 /* Init the map's fields that depend on kern_btf */
855 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
856                                          const struct btf *btf,
857                                          const struct btf *kern_btf)
858 {
859         const struct btf_member *member, *kern_member, *kern_data_member;
860         const struct btf_type *type, *kern_type, *kern_vtype;
861         __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
862         struct bpf_struct_ops *st_ops;
863         void *data, *kern_data;
864         const char *tname;
865         int err;
866
867         st_ops = map->st_ops;
868         type = st_ops->type;
869         tname = st_ops->tname;
870         err = find_struct_ops_kern_types(kern_btf, tname,
871                                          &kern_type, &kern_type_id,
872                                          &kern_vtype, &kern_vtype_id,
873                                          &kern_data_member);
874         if (err)
875                 return err;
876
877         pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
878                  map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
879
880         map->def.value_size = kern_vtype->size;
881         map->btf_vmlinux_value_type_id = kern_vtype_id;
882
883         st_ops->kern_vdata = calloc(1, kern_vtype->size);
884         if (!st_ops->kern_vdata)
885                 return -ENOMEM;
886
887         data = st_ops->data;
888         kern_data_off = kern_data_member->offset / 8;
889         kern_data = st_ops->kern_vdata + kern_data_off;
890
891         member = btf_members(type);
892         for (i = 0; i < btf_vlen(type); i++, member++) {
893                 const struct btf_type *mtype, *kern_mtype;
894                 __u32 mtype_id, kern_mtype_id;
895                 void *mdata, *kern_mdata;
896                 __s64 msize, kern_msize;
897                 __u32 moff, kern_moff;
898                 __u32 kern_member_idx;
899                 const char *mname;
900
901                 mname = btf__name_by_offset(btf, member->name_off);
902                 kern_member = find_member_by_name(kern_btf, kern_type, mname);
903                 if (!kern_member) {
904                         pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
905                                 map->name, mname);
906                         return -ENOTSUP;
907                 }
908
909                 kern_member_idx = kern_member - btf_members(kern_type);
910                 if (btf_member_bitfield_size(type, i) ||
911                     btf_member_bitfield_size(kern_type, kern_member_idx)) {
912                         pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
913                                 map->name, mname);
914                         return -ENOTSUP;
915                 }
916
917                 moff = member->offset / 8;
918                 kern_moff = kern_member->offset / 8;
919
920                 mdata = data + moff;
921                 kern_mdata = kern_data + kern_moff;
922
923                 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
924                 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
925                                                     &kern_mtype_id);
926                 if (BTF_INFO_KIND(mtype->info) !=
927                     BTF_INFO_KIND(kern_mtype->info)) {
928                         pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
929                                 map->name, mname, BTF_INFO_KIND(mtype->info),
930                                 BTF_INFO_KIND(kern_mtype->info));
931                         return -ENOTSUP;
932                 }
933
934                 if (btf_is_ptr(mtype)) {
935                         struct bpf_program *prog;
936
937                         prog = st_ops->progs[i];
938                         if (!prog)
939                                 continue;
940
941                         kern_mtype = skip_mods_and_typedefs(kern_btf,
942                                                             kern_mtype->type,
943                                                             &kern_mtype_id);
944
945                         /* mtype->type must be a func_proto which was
946                          * guaranteed in bpf_object__collect_st_ops_relos(),
947                          * so only check kern_mtype for func_proto here.
948                          */
949                         if (!btf_is_func_proto(kern_mtype)) {
950                                 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
951                                         map->name, mname);
952                                 return -ENOTSUP;
953                         }
954
955                         prog->attach_btf_id = kern_type_id;
956                         prog->expected_attach_type = kern_member_idx;
957
958                         st_ops->kern_func_off[i] = kern_data_off + kern_moff;
959
960                         pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
961                                  map->name, mname, prog->name, moff,
962                                  kern_moff);
963
964                         continue;
965                 }
966
967                 msize = btf__resolve_size(btf, mtype_id);
968                 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
969                 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
970                         pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
971                                 map->name, mname, (ssize_t)msize,
972                                 (ssize_t)kern_msize);
973                         return -ENOTSUP;
974                 }
975
976                 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
977                          map->name, mname, (unsigned int)msize,
978                          moff, kern_moff);
979                 memcpy(kern_mdata, mdata, msize);
980         }
981
982         return 0;
983 }
984
985 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
986 {
987         struct bpf_map *map;
988         size_t i;
989         int err;
990
991         for (i = 0; i < obj->nr_maps; i++) {
992                 map = &obj->maps[i];
993
994                 if (!bpf_map__is_struct_ops(map))
995                         continue;
996
997                 err = bpf_map__init_kern_struct_ops(map, obj->btf,
998                                                     obj->btf_vmlinux);
999                 if (err)
1000                         return err;
1001         }
1002
1003         return 0;
1004 }
1005
1006 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1007 {
1008         const struct btf_type *type, *datasec;
1009         const struct btf_var_secinfo *vsi;
1010         struct bpf_struct_ops *st_ops;
1011         const char *tname, *var_name;
1012         __s32 type_id, datasec_id;
1013         const struct btf *btf;
1014         struct bpf_map *map;
1015         __u32 i;
1016
1017         if (obj->efile.st_ops_shndx == -1)
1018                 return 0;
1019
1020         btf = obj->btf;
1021         datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1022                                             BTF_KIND_DATASEC);
1023         if (datasec_id < 0) {
1024                 pr_warn("struct_ops init: DATASEC %s not found\n",
1025                         STRUCT_OPS_SEC);
1026                 return -EINVAL;
1027         }
1028
1029         datasec = btf__type_by_id(btf, datasec_id);
1030         vsi = btf_var_secinfos(datasec);
1031         for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1032                 type = btf__type_by_id(obj->btf, vsi->type);
1033                 var_name = btf__name_by_offset(obj->btf, type->name_off);
1034
1035                 type_id = btf__resolve_type(obj->btf, vsi->type);
1036                 if (type_id < 0) {
1037                         pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1038                                 vsi->type, STRUCT_OPS_SEC);
1039                         return -EINVAL;
1040                 }
1041
1042                 type = btf__type_by_id(obj->btf, type_id);
1043                 tname = btf__name_by_offset(obj->btf, type->name_off);
1044                 if (!tname[0]) {
1045                         pr_warn("struct_ops init: anonymous type is not supported\n");
1046                         return -ENOTSUP;
1047                 }
1048                 if (!btf_is_struct(type)) {
1049                         pr_warn("struct_ops init: %s is not a struct\n", tname);
1050                         return -EINVAL;
1051                 }
1052
1053                 map = bpf_object__add_map(obj);
1054                 if (IS_ERR(map))
1055                         return PTR_ERR(map);
1056
1057                 map->sec_idx = obj->efile.st_ops_shndx;
1058                 map->sec_offset = vsi->offset;
1059                 map->name = strdup(var_name);
1060                 if (!map->name)
1061                         return -ENOMEM;
1062
1063                 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1064                 map->def.key_size = sizeof(int);
1065                 map->def.value_size = type->size;
1066                 map->def.max_entries = 1;
1067
1068                 map->st_ops = calloc(1, sizeof(*map->st_ops));
1069                 if (!map->st_ops)
1070                         return -ENOMEM;
1071                 st_ops = map->st_ops;
1072                 st_ops->data = malloc(type->size);
1073                 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1074                 st_ops->kern_func_off = malloc(btf_vlen(type) *
1075                                                sizeof(*st_ops->kern_func_off));
1076                 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1077                         return -ENOMEM;
1078
1079                 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1080                         pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1081                                 var_name, STRUCT_OPS_SEC);
1082                         return -EINVAL;
1083                 }
1084
1085                 memcpy(st_ops->data,
1086                        obj->efile.st_ops_data->d_buf + vsi->offset,
1087                        type->size);
1088                 st_ops->tname = tname;
1089                 st_ops->type = type;
1090                 st_ops->type_id = type_id;
1091
1092                 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1093                          tname, type_id, var_name, vsi->offset);
1094         }
1095
1096         return 0;
1097 }
1098
1099 static struct bpf_object *bpf_object__new(const char *path,
1100                                           const void *obj_buf,
1101                                           size_t obj_buf_sz,
1102                                           const char *obj_name)
1103 {
1104         struct bpf_object *obj;
1105         char *end;
1106
1107         obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1108         if (!obj) {
1109                 pr_warn("alloc memory failed for %s\n", path);
1110                 return ERR_PTR(-ENOMEM);
1111         }
1112
1113         strcpy(obj->path, path);
1114         if (obj_name) {
1115                 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1116                 obj->name[sizeof(obj->name) - 1] = 0;
1117         } else {
1118                 /* Using basename() GNU version which doesn't modify arg. */
1119                 strncpy(obj->name, basename((void *)path),
1120                         sizeof(obj->name) - 1);
1121                 end = strchr(obj->name, '.');
1122                 if (end)
1123                         *end = 0;
1124         }
1125
1126         obj->efile.fd = -1;
1127         /*
1128          * Caller of this function should also call
1129          * bpf_object__elf_finish() after data collection to return
1130          * obj_buf to user. If not, we should duplicate the buffer to
1131          * avoid user freeing them before elf finish.
1132          */
1133         obj->efile.obj_buf = obj_buf;
1134         obj->efile.obj_buf_sz = obj_buf_sz;
1135         obj->efile.maps_shndx = -1;
1136         obj->efile.btf_maps_shndx = -1;
1137         obj->efile.data_shndx = -1;
1138         obj->efile.rodata_shndx = -1;
1139         obj->efile.bss_shndx = -1;
1140         obj->efile.st_ops_shndx = -1;
1141         obj->kconfig_map_idx = -1;
1142         obj->rodata_map_idx = -1;
1143
1144         obj->kern_version = get_kernel_version();
1145         obj->loaded = false;
1146
1147         INIT_LIST_HEAD(&obj->list);
1148         list_add(&obj->list, &bpf_objects_list);
1149         return obj;
1150 }
1151
1152 static void bpf_object__elf_finish(struct bpf_object *obj)
1153 {
1154         if (!obj_elf_valid(obj))
1155                 return;
1156
1157         if (obj->efile.elf) {
1158                 elf_end(obj->efile.elf);
1159                 obj->efile.elf = NULL;
1160         }
1161         obj->efile.symbols = NULL;
1162         obj->efile.data = NULL;
1163         obj->efile.rodata = NULL;
1164         obj->efile.bss = NULL;
1165         obj->efile.st_ops_data = NULL;
1166
1167         zfree(&obj->efile.reloc_sects);
1168         obj->efile.nr_reloc_sects = 0;
1169         zclose(obj->efile.fd);
1170         obj->efile.obj_buf = NULL;
1171         obj->efile.obj_buf_sz = 0;
1172 }
1173
1174 static int bpf_object__elf_init(struct bpf_object *obj)
1175 {
1176         int err = 0;
1177         GElf_Ehdr *ep;
1178
1179         if (obj_elf_valid(obj)) {
1180                 pr_warn("elf: init internal error\n");
1181                 return -LIBBPF_ERRNO__LIBELF;
1182         }
1183
1184         if (obj->efile.obj_buf_sz > 0) {
1185                 /*
1186                  * obj_buf should have been validated by
1187                  * bpf_object__open_buffer().
1188                  */
1189                 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1190                                             obj->efile.obj_buf_sz);
1191         } else {
1192                 obj->efile.fd = open(obj->path, O_RDONLY);
1193                 if (obj->efile.fd < 0) {
1194                         char errmsg[STRERR_BUFSIZE], *cp;
1195
1196                         err = -errno;
1197                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1198                         pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1199                         return err;
1200                 }
1201
1202                 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1203         }
1204
1205         if (!obj->efile.elf) {
1206                 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1207                 err = -LIBBPF_ERRNO__LIBELF;
1208                 goto errout;
1209         }
1210
1211         if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1212                 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1213                 err = -LIBBPF_ERRNO__FORMAT;
1214                 goto errout;
1215         }
1216         ep = &obj->efile.ehdr;
1217
1218         if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1219                 pr_warn("elf: failed to get section names section index for %s: %s\n",
1220                         obj->path, elf_errmsg(-1));
1221                 err = -LIBBPF_ERRNO__FORMAT;
1222                 goto errout;
1223         }
1224
1225         /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1226         if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1227                 pr_warn("elf: failed to get section names strings from %s: %s\n",
1228                         obj->path, elf_errmsg(-1));
1229                 err = -LIBBPF_ERRNO__FORMAT;
1230                 goto errout;
1231         }
1232
1233         /* Old LLVM set e_machine to EM_NONE */
1234         if (ep->e_type != ET_REL ||
1235             (ep->e_machine && ep->e_machine != EM_BPF)) {
1236                 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1237                 err = -LIBBPF_ERRNO__FORMAT;
1238                 goto errout;
1239         }
1240
1241         return 0;
1242 errout:
1243         bpf_object__elf_finish(obj);
1244         return err;
1245 }
1246
1247 static int bpf_object__check_endianness(struct bpf_object *obj)
1248 {
1249 #if __BYTE_ORDER == __LITTLE_ENDIAN
1250         if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1251                 return 0;
1252 #elif __BYTE_ORDER == __BIG_ENDIAN
1253         if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1254                 return 0;
1255 #else
1256 # error "Unrecognized __BYTE_ORDER__"
1257 #endif
1258         pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1259         return -LIBBPF_ERRNO__ENDIAN;
1260 }
1261
1262 static int
1263 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1264 {
1265         memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1266         pr_debug("license of %s is %s\n", obj->path, obj->license);
1267         return 0;
1268 }
1269
1270 static int
1271 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1272 {
1273         __u32 kver;
1274
1275         if (size != sizeof(kver)) {
1276                 pr_warn("invalid kver section in %s\n", obj->path);
1277                 return -LIBBPF_ERRNO__FORMAT;
1278         }
1279         memcpy(&kver, data, sizeof(kver));
1280         obj->kern_version = kver;
1281         pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1282         return 0;
1283 }
1284
1285 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1286 {
1287         if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1288             type == BPF_MAP_TYPE_HASH_OF_MAPS)
1289                 return true;
1290         return false;
1291 }
1292
1293 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1294                              __u32 *size)
1295 {
1296         int ret = -ENOENT;
1297
1298         *size = 0;
1299         if (!name) {
1300                 return -EINVAL;
1301         } else if (!strcmp(name, DATA_SEC)) {
1302                 if (obj->efile.data)
1303                         *size = obj->efile.data->d_size;
1304         } else if (!strcmp(name, BSS_SEC)) {
1305                 if (obj->efile.bss)
1306                         *size = obj->efile.bss->d_size;
1307         } else if (!strcmp(name, RODATA_SEC)) {
1308                 if (obj->efile.rodata)
1309                         *size = obj->efile.rodata->d_size;
1310         } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1311                 if (obj->efile.st_ops_data)
1312                         *size = obj->efile.st_ops_data->d_size;
1313         } else {
1314                 Elf_Scn *scn = elf_sec_by_name(obj, name);
1315                 Elf_Data *data = elf_sec_data(obj, scn);
1316
1317                 if (data) {
1318                         ret = 0; /* found it */
1319                         *size = data->d_size;
1320                 }
1321         }
1322
1323         return *size ? 0 : ret;
1324 }
1325
1326 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1327                                 __u32 *off)
1328 {
1329         Elf_Data *symbols = obj->efile.symbols;
1330         const char *sname;
1331         size_t si;
1332
1333         if (!name || !off)
1334                 return -EINVAL;
1335
1336         for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1337                 GElf_Sym sym;
1338
1339                 if (!gelf_getsym(symbols, si, &sym))
1340                         continue;
1341                 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1342                     GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1343                         continue;
1344
1345                 sname = elf_sym_str(obj, sym.st_name);
1346                 if (!sname) {
1347                         pr_warn("failed to get sym name string for var %s\n",
1348                                 name);
1349                         return -EIO;
1350                 }
1351                 if (strcmp(name, sname) == 0) {
1352                         *off = sym.st_value;
1353                         return 0;
1354                 }
1355         }
1356
1357         return -ENOENT;
1358 }
1359
1360 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1361 {
1362         struct bpf_map *new_maps;
1363         size_t new_cap;
1364         int i;
1365
1366         if (obj->nr_maps < obj->maps_cap)
1367                 return &obj->maps[obj->nr_maps++];
1368
1369         new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1370         new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1371         if (!new_maps) {
1372                 pr_warn("alloc maps for object failed\n");
1373                 return ERR_PTR(-ENOMEM);
1374         }
1375
1376         obj->maps_cap = new_cap;
1377         obj->maps = new_maps;
1378
1379         /* zero out new maps */
1380         memset(obj->maps + obj->nr_maps, 0,
1381                (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1382         /*
1383          * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1384          * when failure (zclose won't close negative fd)).
1385          */
1386         for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1387                 obj->maps[i].fd = -1;
1388                 obj->maps[i].inner_map_fd = -1;
1389         }
1390
1391         return &obj->maps[obj->nr_maps++];
1392 }
1393
1394 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1395 {
1396         long page_sz = sysconf(_SC_PAGE_SIZE);
1397         size_t map_sz;
1398
1399         map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1400         map_sz = roundup(map_sz, page_sz);
1401         return map_sz;
1402 }
1403
1404 static char *internal_map_name(struct bpf_object *obj,
1405                                enum libbpf_map_type type)
1406 {
1407         char map_name[BPF_OBJ_NAME_LEN], *p;
1408         const char *sfx = libbpf_type_to_btf_name[type];
1409         int sfx_len = max((size_t)7, strlen(sfx));
1410         int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1411                           strlen(obj->name));
1412
1413         snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1414                  sfx_len, libbpf_type_to_btf_name[type]);
1415
1416         /* sanitise map name to characters allowed by kernel */
1417         for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1418                 if (!isalnum(*p) && *p != '_' && *p != '.')
1419                         *p = '_';
1420
1421         return strdup(map_name);
1422 }
1423
1424 static int
1425 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1426                               int sec_idx, void *data, size_t data_sz)
1427 {
1428         struct bpf_map_def *def;
1429         struct bpf_map *map;
1430         int err;
1431
1432         map = bpf_object__add_map(obj);
1433         if (IS_ERR(map))
1434                 return PTR_ERR(map);
1435
1436         map->libbpf_type = type;
1437         map->sec_idx = sec_idx;
1438         map->sec_offset = 0;
1439         map->name = internal_map_name(obj, type);
1440         if (!map->name) {
1441                 pr_warn("failed to alloc map name\n");
1442                 return -ENOMEM;
1443         }
1444
1445         def = &map->def;
1446         def->type = BPF_MAP_TYPE_ARRAY;
1447         def->key_size = sizeof(int);
1448         def->value_size = data_sz;
1449         def->max_entries = 1;
1450         def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1451                          ? BPF_F_RDONLY_PROG : 0;
1452         def->map_flags |= BPF_F_MMAPABLE;
1453
1454         pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1455                  map->name, map->sec_idx, map->sec_offset, def->map_flags);
1456
1457         map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1458                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1459         if (map->mmaped == MAP_FAILED) {
1460                 err = -errno;
1461                 map->mmaped = NULL;
1462                 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1463                         map->name, err);
1464                 zfree(&map->name);
1465                 return err;
1466         }
1467
1468         if (data)
1469                 memcpy(map->mmaped, data, data_sz);
1470
1471         pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1472         return 0;
1473 }
1474
1475 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1476 {
1477         int err;
1478
1479         /*
1480          * Populate obj->maps with libbpf internal maps.
1481          */
1482         if (obj->efile.data_shndx >= 0) {
1483                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1484                                                     obj->efile.data_shndx,
1485                                                     obj->efile.data->d_buf,
1486                                                     obj->efile.data->d_size);
1487                 if (err)
1488                         return err;
1489         }
1490         if (obj->efile.rodata_shndx >= 0) {
1491                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1492                                                     obj->efile.rodata_shndx,
1493                                                     obj->efile.rodata->d_buf,
1494                                                     obj->efile.rodata->d_size);
1495                 if (err)
1496                         return err;
1497
1498                 obj->rodata_map_idx = obj->nr_maps - 1;
1499         }
1500         if (obj->efile.bss_shndx >= 0) {
1501                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1502                                                     obj->efile.bss_shndx,
1503                                                     NULL,
1504                                                     obj->efile.bss->d_size);
1505                 if (err)
1506                         return err;
1507         }
1508         return 0;
1509 }
1510
1511
1512 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1513                                                const void *name)
1514 {
1515         int i;
1516
1517         for (i = 0; i < obj->nr_extern; i++) {
1518                 if (strcmp(obj->externs[i].name, name) == 0)
1519                         return &obj->externs[i];
1520         }
1521         return NULL;
1522 }
1523
1524 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1525                               char value)
1526 {
1527         switch (ext->kcfg.type) {
1528         case KCFG_BOOL:
1529                 if (value == 'm') {
1530                         pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1531                                 ext->name, value);
1532                         return -EINVAL;
1533                 }
1534                 *(bool *)ext_val = value == 'y' ? true : false;
1535                 break;
1536         case KCFG_TRISTATE:
1537                 if (value == 'y')
1538                         *(enum libbpf_tristate *)ext_val = TRI_YES;
1539                 else if (value == 'm')
1540                         *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1541                 else /* value == 'n' */
1542                         *(enum libbpf_tristate *)ext_val = TRI_NO;
1543                 break;
1544         case KCFG_CHAR:
1545                 *(char *)ext_val = value;
1546                 break;
1547         case KCFG_UNKNOWN:
1548         case KCFG_INT:
1549         case KCFG_CHAR_ARR:
1550         default:
1551                 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1552                         ext->name, value);
1553                 return -EINVAL;
1554         }
1555         ext->is_set = true;
1556         return 0;
1557 }
1558
1559 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1560                               const char *value)
1561 {
1562         size_t len;
1563
1564         if (ext->kcfg.type != KCFG_CHAR_ARR) {
1565                 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1566                 return -EINVAL;
1567         }
1568
1569         len = strlen(value);
1570         if (value[len - 1] != '"') {
1571                 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1572                         ext->name, value);
1573                 return -EINVAL;
1574         }
1575
1576         /* strip quotes */
1577         len -= 2;
1578         if (len >= ext->kcfg.sz) {
1579                 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1580                         ext->name, value, len, ext->kcfg.sz - 1);
1581                 len = ext->kcfg.sz - 1;
1582         }
1583         memcpy(ext_val, value + 1, len);
1584         ext_val[len] = '\0';
1585         ext->is_set = true;
1586         return 0;
1587 }
1588
1589 static int parse_u64(const char *value, __u64 *res)
1590 {
1591         char *value_end;
1592         int err;
1593
1594         errno = 0;
1595         *res = strtoull(value, &value_end, 0);
1596         if (errno) {
1597                 err = -errno;
1598                 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1599                 return err;
1600         }
1601         if (*value_end) {
1602                 pr_warn("failed to parse '%s' as integer completely\n", value);
1603                 return -EINVAL;
1604         }
1605         return 0;
1606 }
1607
1608 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1609 {
1610         int bit_sz = ext->kcfg.sz * 8;
1611
1612         if (ext->kcfg.sz == 8)
1613                 return true;
1614
1615         /* Validate that value stored in u64 fits in integer of `ext->sz`
1616          * bytes size without any loss of information. If the target integer
1617          * is signed, we rely on the following limits of integer type of
1618          * Y bits and subsequent transformation:
1619          *
1620          *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
1621          *            0 <= X + 2^(Y-1) <= 2^Y - 1
1622          *            0 <= X + 2^(Y-1) <  2^Y
1623          *
1624          *  For unsigned target integer, check that all the (64 - Y) bits are
1625          *  zero.
1626          */
1627         if (ext->kcfg.is_signed)
1628                 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1629         else
1630                 return (v >> bit_sz) == 0;
1631 }
1632
1633 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1634                               __u64 value)
1635 {
1636         if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1637                 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1638                         ext->name, (unsigned long long)value);
1639                 return -EINVAL;
1640         }
1641         if (!is_kcfg_value_in_range(ext, value)) {
1642                 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1643                         ext->name, (unsigned long long)value, ext->kcfg.sz);
1644                 return -ERANGE;
1645         }
1646         switch (ext->kcfg.sz) {
1647                 case 1: *(__u8 *)ext_val = value; break;
1648                 case 2: *(__u16 *)ext_val = value; break;
1649                 case 4: *(__u32 *)ext_val = value; break;
1650                 case 8: *(__u64 *)ext_val = value; break;
1651                 default:
1652                         return -EINVAL;
1653         }
1654         ext->is_set = true;
1655         return 0;
1656 }
1657
1658 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1659                                             char *buf, void *data)
1660 {
1661         struct extern_desc *ext;
1662         char *sep, *value;
1663         int len, err = 0;
1664         void *ext_val;
1665         __u64 num;
1666
1667         if (strncmp(buf, "CONFIG_", 7))
1668                 return 0;
1669
1670         sep = strchr(buf, '=');
1671         if (!sep) {
1672                 pr_warn("failed to parse '%s': no separator\n", buf);
1673                 return -EINVAL;
1674         }
1675
1676         /* Trim ending '\n' */
1677         len = strlen(buf);
1678         if (buf[len - 1] == '\n')
1679                 buf[len - 1] = '\0';
1680         /* Split on '=' and ensure that a value is present. */
1681         *sep = '\0';
1682         if (!sep[1]) {
1683                 *sep = '=';
1684                 pr_warn("failed to parse '%s': no value\n", buf);
1685                 return -EINVAL;
1686         }
1687
1688         ext = find_extern_by_name(obj, buf);
1689         if (!ext || ext->is_set)
1690                 return 0;
1691
1692         ext_val = data + ext->kcfg.data_off;
1693         value = sep + 1;
1694
1695         switch (*value) {
1696         case 'y': case 'n': case 'm':
1697                 err = set_kcfg_value_tri(ext, ext_val, *value);
1698                 break;
1699         case '"':
1700                 err = set_kcfg_value_str(ext, ext_val, value);
1701                 break;
1702         default:
1703                 /* assume integer */
1704                 err = parse_u64(value, &num);
1705                 if (err) {
1706                         pr_warn("extern (kcfg) %s=%s should be integer\n",
1707                                 ext->name, value);
1708                         return err;
1709                 }
1710                 err = set_kcfg_value_num(ext, ext_val, num);
1711                 break;
1712         }
1713         if (err)
1714                 return err;
1715         pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1716         return 0;
1717 }
1718
1719 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1720 {
1721         char buf[PATH_MAX];
1722         struct utsname uts;
1723         int len, err = 0;
1724         gzFile file;
1725
1726         uname(&uts);
1727         len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1728         if (len < 0)
1729                 return -EINVAL;
1730         else if (len >= PATH_MAX)
1731                 return -ENAMETOOLONG;
1732
1733         /* gzopen also accepts uncompressed files. */
1734         file = gzopen(buf, "r");
1735         if (!file)
1736                 file = gzopen("/proc/config.gz", "r");
1737
1738         if (!file) {
1739                 pr_warn("failed to open system Kconfig\n");
1740                 return -ENOENT;
1741         }
1742
1743         while (gzgets(file, buf, sizeof(buf))) {
1744                 err = bpf_object__process_kconfig_line(obj, buf, data);
1745                 if (err) {
1746                         pr_warn("error parsing system Kconfig line '%s': %d\n",
1747                                 buf, err);
1748                         goto out;
1749                 }
1750         }
1751
1752 out:
1753         gzclose(file);
1754         return err;
1755 }
1756
1757 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1758                                         const char *config, void *data)
1759 {
1760         char buf[PATH_MAX];
1761         int err = 0;
1762         FILE *file;
1763
1764         file = fmemopen((void *)config, strlen(config), "r");
1765         if (!file) {
1766                 err = -errno;
1767                 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1768                 return err;
1769         }
1770
1771         while (fgets(buf, sizeof(buf), file)) {
1772                 err = bpf_object__process_kconfig_line(obj, buf, data);
1773                 if (err) {
1774                         pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1775                                 buf, err);
1776                         break;
1777                 }
1778         }
1779
1780         fclose(file);
1781         return err;
1782 }
1783
1784 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1785 {
1786         struct extern_desc *last_ext = NULL, *ext;
1787         size_t map_sz;
1788         int i, err;
1789
1790         for (i = 0; i < obj->nr_extern; i++) {
1791                 ext = &obj->externs[i];
1792                 if (ext->type == EXT_KCFG)
1793                         last_ext = ext;
1794         }
1795
1796         if (!last_ext)
1797                 return 0;
1798
1799         map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1800         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1801                                             obj->efile.symbols_shndx,
1802                                             NULL, map_sz);
1803         if (err)
1804                 return err;
1805
1806         obj->kconfig_map_idx = obj->nr_maps - 1;
1807
1808         return 0;
1809 }
1810
1811 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1812 {
1813         Elf_Data *symbols = obj->efile.symbols;
1814         int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1815         Elf_Data *data = NULL;
1816         Elf_Scn *scn;
1817
1818         if (obj->efile.maps_shndx < 0)
1819                 return 0;
1820
1821         if (!symbols)
1822                 return -EINVAL;
1823
1824         scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1825         data = elf_sec_data(obj, scn);
1826         if (!scn || !data) {
1827                 pr_warn("elf: failed to get legacy map definitions for %s\n",
1828                         obj->path);
1829                 return -EINVAL;
1830         }
1831
1832         /*
1833          * Count number of maps. Each map has a name.
1834          * Array of maps is not supported: only the first element is
1835          * considered.
1836          *
1837          * TODO: Detect array of map and report error.
1838          */
1839         nr_syms = symbols->d_size / sizeof(GElf_Sym);
1840         for (i = 0; i < nr_syms; i++) {
1841                 GElf_Sym sym;
1842
1843                 if (!gelf_getsym(symbols, i, &sym))
1844                         continue;
1845                 if (sym.st_shndx != obj->efile.maps_shndx)
1846                         continue;
1847                 nr_maps++;
1848         }
1849         /* Assume equally sized map definitions */
1850         pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1851                  nr_maps, data->d_size, obj->path);
1852
1853         if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1854                 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1855                         obj->path);
1856                 return -EINVAL;
1857         }
1858         map_def_sz = data->d_size / nr_maps;
1859
1860         /* Fill obj->maps using data in "maps" section.  */
1861         for (i = 0; i < nr_syms; i++) {
1862                 GElf_Sym sym;
1863                 const char *map_name;
1864                 struct bpf_map_def *def;
1865                 struct bpf_map *map;
1866
1867                 if (!gelf_getsym(symbols, i, &sym))
1868                         continue;
1869                 if (sym.st_shndx != obj->efile.maps_shndx)
1870                         continue;
1871
1872                 map = bpf_object__add_map(obj);
1873                 if (IS_ERR(map))
1874                         return PTR_ERR(map);
1875
1876                 map_name = elf_sym_str(obj, sym.st_name);
1877                 if (!map_name) {
1878                         pr_warn("failed to get map #%d name sym string for obj %s\n",
1879                                 i, obj->path);
1880                         return -LIBBPF_ERRNO__FORMAT;
1881                 }
1882
1883                 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
1884                     || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
1885                         pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
1886                         return -ENOTSUP;
1887                 }
1888
1889                 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1890                 map->sec_idx = sym.st_shndx;
1891                 map->sec_offset = sym.st_value;
1892                 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1893                          map_name, map->sec_idx, map->sec_offset);
1894                 if (sym.st_value + map_def_sz > data->d_size) {
1895                         pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1896                                 obj->path, map_name);
1897                         return -EINVAL;
1898                 }
1899
1900                 map->name = strdup(map_name);
1901                 if (!map->name) {
1902                         pr_warn("failed to alloc map name\n");
1903                         return -ENOMEM;
1904                 }
1905                 pr_debug("map %d is \"%s\"\n", i, map->name);
1906                 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1907                 /*
1908                  * If the definition of the map in the object file fits in
1909                  * bpf_map_def, copy it.  Any extra fields in our version
1910                  * of bpf_map_def will default to zero as a result of the
1911                  * calloc above.
1912                  */
1913                 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1914                         memcpy(&map->def, def, map_def_sz);
1915                 } else {
1916                         /*
1917                          * Here the map structure being read is bigger than what
1918                          * we expect, truncate if the excess bits are all zero.
1919                          * If they are not zero, reject this map as
1920                          * incompatible.
1921                          */
1922                         char *b;
1923
1924                         for (b = ((char *)def) + sizeof(struct bpf_map_def);
1925                              b < ((char *)def) + map_def_sz; b++) {
1926                                 if (*b != 0) {
1927                                         pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1928                                                 obj->path, map_name);
1929                                         if (strict)
1930                                                 return -EINVAL;
1931                                 }
1932                         }
1933                         memcpy(&map->def, def, sizeof(struct bpf_map_def));
1934                 }
1935         }
1936         return 0;
1937 }
1938
1939 const struct btf_type *
1940 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1941 {
1942         const struct btf_type *t = btf__type_by_id(btf, id);
1943
1944         if (res_id)
1945                 *res_id = id;
1946
1947         while (btf_is_mod(t) || btf_is_typedef(t)) {
1948                 if (res_id)
1949                         *res_id = t->type;
1950                 t = btf__type_by_id(btf, t->type);
1951         }
1952
1953         return t;
1954 }
1955
1956 static const struct btf_type *
1957 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1958 {
1959         const struct btf_type *t;
1960
1961         t = skip_mods_and_typedefs(btf, id, NULL);
1962         if (!btf_is_ptr(t))
1963                 return NULL;
1964
1965         t = skip_mods_and_typedefs(btf, t->type, res_id);
1966
1967         return btf_is_func_proto(t) ? t : NULL;
1968 }
1969
1970 static const char *__btf_kind_str(__u16 kind)
1971 {
1972         switch (kind) {
1973         case BTF_KIND_UNKN: return "void";
1974         case BTF_KIND_INT: return "int";
1975         case BTF_KIND_PTR: return "ptr";
1976         case BTF_KIND_ARRAY: return "array";
1977         case BTF_KIND_STRUCT: return "struct";
1978         case BTF_KIND_UNION: return "union";
1979         case BTF_KIND_ENUM: return "enum";
1980         case BTF_KIND_FWD: return "fwd";
1981         case BTF_KIND_TYPEDEF: return "typedef";
1982         case BTF_KIND_VOLATILE: return "volatile";
1983         case BTF_KIND_CONST: return "const";
1984         case BTF_KIND_RESTRICT: return "restrict";
1985         case BTF_KIND_FUNC: return "func";
1986         case BTF_KIND_FUNC_PROTO: return "func_proto";
1987         case BTF_KIND_VAR: return "var";
1988         case BTF_KIND_DATASEC: return "datasec";
1989         case BTF_KIND_FLOAT: return "float";
1990         default: return "unknown";
1991         }
1992 }
1993
1994 const char *btf_kind_str(const struct btf_type *t)
1995 {
1996         return __btf_kind_str(btf_kind(t));
1997 }
1998
1999 /*
2000  * Fetch integer attribute of BTF map definition. Such attributes are
2001  * represented using a pointer to an array, in which dimensionality of array
2002  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2003  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2004  * type definition, while using only sizeof(void *) space in ELF data section.
2005  */
2006 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2007                               const struct btf_member *m, __u32 *res)
2008 {
2009         const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2010         const char *name = btf__name_by_offset(btf, m->name_off);
2011         const struct btf_array *arr_info;
2012         const struct btf_type *arr_t;
2013
2014         if (!btf_is_ptr(t)) {
2015                 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2016                         map_name, name, btf_kind_str(t));
2017                 return false;
2018         }
2019
2020         arr_t = btf__type_by_id(btf, t->type);
2021         if (!arr_t) {
2022                 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2023                         map_name, name, t->type);
2024                 return false;
2025         }
2026         if (!btf_is_array(arr_t)) {
2027                 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2028                         map_name, name, btf_kind_str(arr_t));
2029                 return false;
2030         }
2031         arr_info = btf_array(arr_t);
2032         *res = arr_info->nelems;
2033         return true;
2034 }
2035
2036 static int build_map_pin_path(struct bpf_map *map, const char *path)
2037 {
2038         char buf[PATH_MAX];
2039         int len;
2040
2041         if (!path)
2042                 path = "/sys/fs/bpf";
2043
2044         len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2045         if (len < 0)
2046                 return -EINVAL;
2047         else if (len >= PATH_MAX)
2048                 return -ENAMETOOLONG;
2049
2050         return bpf_map__set_pin_path(map, buf);
2051 }
2052
2053 int parse_btf_map_def(const char *map_name, struct btf *btf,
2054                       const struct btf_type *def_t, bool strict,
2055                       struct btf_map_def *map_def, struct btf_map_def *inner_def)
2056 {
2057         const struct btf_type *t;
2058         const struct btf_member *m;
2059         bool is_inner = inner_def == NULL;
2060         int vlen, i;
2061
2062         vlen = btf_vlen(def_t);
2063         m = btf_members(def_t);
2064         for (i = 0; i < vlen; i++, m++) {
2065                 const char *name = btf__name_by_offset(btf, m->name_off);
2066
2067                 if (!name) {
2068                         pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2069                         return -EINVAL;
2070                 }
2071                 if (strcmp(name, "type") == 0) {
2072                         if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2073                                 return -EINVAL;
2074                         map_def->parts |= MAP_DEF_MAP_TYPE;
2075                 } else if (strcmp(name, "max_entries") == 0) {
2076                         if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2077                                 return -EINVAL;
2078                         map_def->parts |= MAP_DEF_MAX_ENTRIES;
2079                 } else if (strcmp(name, "map_flags") == 0) {
2080                         if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2081                                 return -EINVAL;
2082                         map_def->parts |= MAP_DEF_MAP_FLAGS;
2083                 } else if (strcmp(name, "numa_node") == 0) {
2084                         if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2085                                 return -EINVAL;
2086                         map_def->parts |= MAP_DEF_NUMA_NODE;
2087                 } else if (strcmp(name, "key_size") == 0) {
2088                         __u32 sz;
2089
2090                         if (!get_map_field_int(map_name, btf, m, &sz))
2091                                 return -EINVAL;
2092                         if (map_def->key_size && map_def->key_size != sz) {
2093                                 pr_warn("map '%s': conflicting key size %u != %u.\n",
2094                                         map_name, map_def->key_size, sz);
2095                                 return -EINVAL;
2096                         }
2097                         map_def->key_size = sz;
2098                         map_def->parts |= MAP_DEF_KEY_SIZE;
2099                 } else if (strcmp(name, "key") == 0) {
2100                         __s64 sz;
2101
2102                         t = btf__type_by_id(btf, m->type);
2103                         if (!t) {
2104                                 pr_warn("map '%s': key type [%d] not found.\n",
2105                                         map_name, m->type);
2106                                 return -EINVAL;
2107                         }
2108                         if (!btf_is_ptr(t)) {
2109                                 pr_warn("map '%s': key spec is not PTR: %s.\n",
2110                                         map_name, btf_kind_str(t));
2111                                 return -EINVAL;
2112                         }
2113                         sz = btf__resolve_size(btf, t->type);
2114                         if (sz < 0) {
2115                                 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2116                                         map_name, t->type, (ssize_t)sz);
2117                                 return sz;
2118                         }
2119                         if (map_def->key_size && map_def->key_size != sz) {
2120                                 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2121                                         map_name, map_def->key_size, (ssize_t)sz);
2122                                 return -EINVAL;
2123                         }
2124                         map_def->key_size = sz;
2125                         map_def->key_type_id = t->type;
2126                         map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2127                 } else if (strcmp(name, "value_size") == 0) {
2128                         __u32 sz;
2129
2130                         if (!get_map_field_int(map_name, btf, m, &sz))
2131                                 return -EINVAL;
2132                         if (map_def->value_size && map_def->value_size != sz) {
2133                                 pr_warn("map '%s': conflicting value size %u != %u.\n",
2134                                         map_name, map_def->value_size, sz);
2135                                 return -EINVAL;
2136                         }
2137                         map_def->value_size = sz;
2138                         map_def->parts |= MAP_DEF_VALUE_SIZE;
2139                 } else if (strcmp(name, "value") == 0) {
2140                         __s64 sz;
2141
2142                         t = btf__type_by_id(btf, m->type);
2143                         if (!t) {
2144                                 pr_warn("map '%s': value type [%d] not found.\n",
2145                                         map_name, m->type);
2146                                 return -EINVAL;
2147                         }
2148                         if (!btf_is_ptr(t)) {
2149                                 pr_warn("map '%s': value spec is not PTR: %s.\n",
2150                                         map_name, btf_kind_str(t));
2151                                 return -EINVAL;
2152                         }
2153                         sz = btf__resolve_size(btf, t->type);
2154                         if (sz < 0) {
2155                                 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2156                                         map_name, t->type, (ssize_t)sz);
2157                                 return sz;
2158                         }
2159                         if (map_def->value_size && map_def->value_size != sz) {
2160                                 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2161                                         map_name, map_def->value_size, (ssize_t)sz);
2162                                 return -EINVAL;
2163                         }
2164                         map_def->value_size = sz;
2165                         map_def->value_type_id = t->type;
2166                         map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2167                 }
2168                 else if (strcmp(name, "values") == 0) {
2169                         char inner_map_name[128];
2170                         int err;
2171
2172                         if (is_inner) {
2173                                 pr_warn("map '%s': multi-level inner maps not supported.\n",
2174                                         map_name);
2175                                 return -ENOTSUP;
2176                         }
2177                         if (i != vlen - 1) {
2178                                 pr_warn("map '%s': '%s' member should be last.\n",
2179                                         map_name, name);
2180                                 return -EINVAL;
2181                         }
2182                         if (!bpf_map_type__is_map_in_map(map_def->map_type)) {
2183                                 pr_warn("map '%s': should be map-in-map.\n",
2184                                         map_name);
2185                                 return -ENOTSUP;
2186                         }
2187                         if (map_def->value_size && map_def->value_size != 4) {
2188                                 pr_warn("map '%s': conflicting value size %u != 4.\n",
2189                                         map_name, map_def->value_size);
2190                                 return -EINVAL;
2191                         }
2192                         map_def->value_size = 4;
2193                         t = btf__type_by_id(btf, m->type);
2194                         if (!t) {
2195                                 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2196                                         map_name, m->type);
2197                                 return -EINVAL;
2198                         }
2199                         if (!btf_is_array(t) || btf_array(t)->nelems) {
2200                                 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2201                                         map_name);
2202                                 return -EINVAL;
2203                         }
2204                         t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2205                         if (!btf_is_ptr(t)) {
2206                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2207                                         map_name, btf_kind_str(t));
2208                                 return -EINVAL;
2209                         }
2210                         t = skip_mods_and_typedefs(btf, t->type, NULL);
2211                         if (!btf_is_struct(t)) {
2212                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2213                                         map_name, btf_kind_str(t));
2214                                 return -EINVAL;
2215                         }
2216
2217                         snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2218                         err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2219                         if (err)
2220                                 return err;
2221
2222                         map_def->parts |= MAP_DEF_INNER_MAP;
2223                 } else if (strcmp(name, "pinning") == 0) {
2224                         __u32 val;
2225
2226                         if (is_inner) {
2227                                 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2228                                 return -EINVAL;
2229                         }
2230                         if (!get_map_field_int(map_name, btf, m, &val))
2231                                 return -EINVAL;
2232                         if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2233                                 pr_warn("map '%s': invalid pinning value %u.\n",
2234                                         map_name, val);
2235                                 return -EINVAL;
2236                         }
2237                         map_def->pinning = val;
2238                         map_def->parts |= MAP_DEF_PINNING;
2239                 } else {
2240                         if (strict) {
2241                                 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2242                                 return -ENOTSUP;
2243                         }
2244                         pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2245                 }
2246         }
2247
2248         if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2249                 pr_warn("map '%s': map type isn't specified.\n", map_name);
2250                 return -EINVAL;
2251         }
2252
2253         return 0;
2254 }
2255
2256 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2257 {
2258         map->def.type = def->map_type;
2259         map->def.key_size = def->key_size;
2260         map->def.value_size = def->value_size;
2261         map->def.max_entries = def->max_entries;
2262         map->def.map_flags = def->map_flags;
2263
2264         map->numa_node = def->numa_node;
2265         map->btf_key_type_id = def->key_type_id;
2266         map->btf_value_type_id = def->value_type_id;
2267
2268         if (def->parts & MAP_DEF_MAP_TYPE)
2269                 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2270
2271         if (def->parts & MAP_DEF_KEY_TYPE)
2272                 pr_debug("map '%s': found key [%u], sz = %u.\n",
2273                          map->name, def->key_type_id, def->key_size);
2274         else if (def->parts & MAP_DEF_KEY_SIZE)
2275                 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2276
2277         if (def->parts & MAP_DEF_VALUE_TYPE)
2278                 pr_debug("map '%s': found value [%u], sz = %u.\n",
2279                          map->name, def->value_type_id, def->value_size);
2280         else if (def->parts & MAP_DEF_VALUE_SIZE)
2281                 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2282
2283         if (def->parts & MAP_DEF_MAX_ENTRIES)
2284                 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2285         if (def->parts & MAP_DEF_MAP_FLAGS)
2286                 pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
2287         if (def->parts & MAP_DEF_PINNING)
2288                 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2289         if (def->parts & MAP_DEF_NUMA_NODE)
2290                 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2291
2292         if (def->parts & MAP_DEF_INNER_MAP)
2293                 pr_debug("map '%s': found inner map definition.\n", map->name);
2294 }
2295
2296 static const char *btf_var_linkage_str(__u32 linkage)
2297 {
2298         switch (linkage) {
2299         case BTF_VAR_STATIC: return "static";
2300         case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2301         case BTF_VAR_GLOBAL_EXTERN: return "extern";
2302         default: return "unknown";
2303         }
2304 }
2305
2306 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2307                                          const struct btf_type *sec,
2308                                          int var_idx, int sec_idx,
2309                                          const Elf_Data *data, bool strict,
2310                                          const char *pin_root_path)
2311 {
2312         struct btf_map_def map_def = {}, inner_def = {};
2313         const struct btf_type *var, *def;
2314         const struct btf_var_secinfo *vi;
2315         const struct btf_var *var_extra;
2316         const char *map_name;
2317         struct bpf_map *map;
2318         int err;
2319
2320         vi = btf_var_secinfos(sec) + var_idx;
2321         var = btf__type_by_id(obj->btf, vi->type);
2322         var_extra = btf_var(var);
2323         map_name = btf__name_by_offset(obj->btf, var->name_off);
2324
2325         if (map_name == NULL || map_name[0] == '\0') {
2326                 pr_warn("map #%d: empty name.\n", var_idx);
2327                 return -EINVAL;
2328         }
2329         if ((__u64)vi->offset + vi->size > data->d_size) {
2330                 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2331                 return -EINVAL;
2332         }
2333         if (!btf_is_var(var)) {
2334                 pr_warn("map '%s': unexpected var kind %s.\n",
2335                         map_name, btf_kind_str(var));
2336                 return -EINVAL;
2337         }
2338         if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2339                 pr_warn("map '%s': unsupported map linkage %s.\n",
2340                         map_name, btf_var_linkage_str(var_extra->linkage));
2341                 return -EOPNOTSUPP;
2342         }
2343
2344         def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2345         if (!btf_is_struct(def)) {
2346                 pr_warn("map '%s': unexpected def kind %s.\n",
2347                         map_name, btf_kind_str(var));
2348                 return -EINVAL;
2349         }
2350         if (def->size > vi->size) {
2351                 pr_warn("map '%s': invalid def size.\n", map_name);
2352                 return -EINVAL;
2353         }
2354
2355         map = bpf_object__add_map(obj);
2356         if (IS_ERR(map))
2357                 return PTR_ERR(map);
2358         map->name = strdup(map_name);
2359         if (!map->name) {
2360                 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2361                 return -ENOMEM;
2362         }
2363         map->libbpf_type = LIBBPF_MAP_UNSPEC;
2364         map->def.type = BPF_MAP_TYPE_UNSPEC;
2365         map->sec_idx = sec_idx;
2366         map->sec_offset = vi->offset;
2367         map->btf_var_idx = var_idx;
2368         pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2369                  map_name, map->sec_idx, map->sec_offset);
2370
2371         err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2372         if (err)
2373                 return err;
2374
2375         fill_map_from_def(map, &map_def);
2376
2377         if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2378                 err = build_map_pin_path(map, pin_root_path);
2379                 if (err) {
2380                         pr_warn("map '%s': couldn't build pin path.\n", map->name);
2381                         return err;
2382                 }
2383         }
2384
2385         if (map_def.parts & MAP_DEF_INNER_MAP) {
2386                 map->inner_map = calloc(1, sizeof(*map->inner_map));
2387                 if (!map->inner_map)
2388                         return -ENOMEM;
2389                 map->inner_map->fd = -1;
2390                 map->inner_map->sec_idx = sec_idx;
2391                 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2392                 if (!map->inner_map->name)
2393                         return -ENOMEM;
2394                 sprintf(map->inner_map->name, "%s.inner", map_name);
2395
2396                 fill_map_from_def(map->inner_map, &inner_def);
2397         }
2398
2399         return 0;
2400 }
2401
2402 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2403                                           const char *pin_root_path)
2404 {
2405         const struct btf_type *sec = NULL;
2406         int nr_types, i, vlen, err;
2407         const struct btf_type *t;
2408         const char *name;
2409         Elf_Data *data;
2410         Elf_Scn *scn;
2411
2412         if (obj->efile.btf_maps_shndx < 0)
2413                 return 0;
2414
2415         scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2416         data = elf_sec_data(obj, scn);
2417         if (!scn || !data) {
2418                 pr_warn("elf: failed to get %s map definitions for %s\n",
2419                         MAPS_ELF_SEC, obj->path);
2420                 return -EINVAL;
2421         }
2422
2423         nr_types = btf__get_nr_types(obj->btf);
2424         for (i = 1; i <= nr_types; i++) {
2425                 t = btf__type_by_id(obj->btf, i);
2426                 if (!btf_is_datasec(t))
2427                         continue;
2428                 name = btf__name_by_offset(obj->btf, t->name_off);
2429                 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2430                         sec = t;
2431                         obj->efile.btf_maps_sec_btf_id = i;
2432                         break;
2433                 }
2434         }
2435
2436         if (!sec) {
2437                 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2438                 return -ENOENT;
2439         }
2440
2441         vlen = btf_vlen(sec);
2442         for (i = 0; i < vlen; i++) {
2443                 err = bpf_object__init_user_btf_map(obj, sec, i,
2444                                                     obj->efile.btf_maps_shndx,
2445                                                     data, strict,
2446                                                     pin_root_path);
2447                 if (err)
2448                         return err;
2449         }
2450
2451         return 0;
2452 }
2453
2454 static int bpf_object__init_maps(struct bpf_object *obj,
2455                                  const struct bpf_object_open_opts *opts)
2456 {
2457         const char *pin_root_path;
2458         bool strict;
2459         int err;
2460
2461         strict = !OPTS_GET(opts, relaxed_maps, false);
2462         pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2463
2464         err = bpf_object__init_user_maps(obj, strict);
2465         err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2466         err = err ?: bpf_object__init_global_data_maps(obj);
2467         err = err ?: bpf_object__init_kconfig_map(obj);
2468         err = err ?: bpf_object__init_struct_ops_maps(obj);
2469
2470         return err;
2471 }
2472
2473 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2474 {
2475         GElf_Shdr sh;
2476
2477         if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2478                 return false;
2479
2480         return sh.sh_flags & SHF_EXECINSTR;
2481 }
2482
2483 static bool btf_needs_sanitization(struct bpf_object *obj)
2484 {
2485         bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2486         bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2487         bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2488         bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2489
2490         return !has_func || !has_datasec || !has_func_global || !has_float;
2491 }
2492
2493 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2494 {
2495         bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2496         bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2497         bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2498         bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2499         struct btf_type *t;
2500         int i, j, vlen;
2501
2502         for (i = 1; i <= btf__get_nr_types(btf); i++) {
2503                 t = (struct btf_type *)btf__type_by_id(btf, i);
2504
2505                 if (!has_datasec && btf_is_var(t)) {
2506                         /* replace VAR with INT */
2507                         t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2508                         /*
2509                          * using size = 1 is the safest choice, 4 will be too
2510                          * big and cause kernel BTF validation failure if
2511                          * original variable took less than 4 bytes
2512                          */
2513                         t->size = 1;
2514                         *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2515                 } else if (!has_datasec && btf_is_datasec(t)) {
2516                         /* replace DATASEC with STRUCT */
2517                         const struct btf_var_secinfo *v = btf_var_secinfos(t);
2518                         struct btf_member *m = btf_members(t);
2519                         struct btf_type *vt;
2520                         char *name;
2521
2522                         name = (char *)btf__name_by_offset(btf, t->name_off);
2523                         while (*name) {
2524                                 if (*name == '.')
2525                                         *name = '_';
2526                                 name++;
2527                         }
2528
2529                         vlen = btf_vlen(t);
2530                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2531                         for (j = 0; j < vlen; j++, v++, m++) {
2532                                 /* order of field assignments is important */
2533                                 m->offset = v->offset * 8;
2534                                 m->type = v->type;
2535                                 /* preserve variable name as member name */
2536                                 vt = (void *)btf__type_by_id(btf, v->type);
2537                                 m->name_off = vt->name_off;
2538                         }
2539                 } else if (!has_func && btf_is_func_proto(t)) {
2540                         /* replace FUNC_PROTO with ENUM */
2541                         vlen = btf_vlen(t);
2542                         t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2543                         t->size = sizeof(__u32); /* kernel enforced */
2544                 } else if (!has_func && btf_is_func(t)) {
2545                         /* replace FUNC with TYPEDEF */
2546                         t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2547                 } else if (!has_func_global && btf_is_func(t)) {
2548                         /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2549                         t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2550                 } else if (!has_float && btf_is_float(t)) {
2551                         /* replace FLOAT with an equally-sized empty STRUCT;
2552                          * since C compilers do not accept e.g. "float" as a
2553                          * valid struct name, make it anonymous
2554                          */
2555                         t->name_off = 0;
2556                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2557                 }
2558         }
2559 }
2560
2561 static bool libbpf_needs_btf(const struct bpf_object *obj)
2562 {
2563         return obj->efile.btf_maps_shndx >= 0 ||
2564                obj->efile.st_ops_shndx >= 0 ||
2565                obj->nr_extern > 0;
2566 }
2567
2568 static bool kernel_needs_btf(const struct bpf_object *obj)
2569 {
2570         return obj->efile.st_ops_shndx >= 0;
2571 }
2572
2573 static int bpf_object__init_btf(struct bpf_object *obj,
2574                                 Elf_Data *btf_data,
2575                                 Elf_Data *btf_ext_data)
2576 {
2577         int err = -ENOENT;
2578
2579         if (btf_data) {
2580                 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2581                 err = libbpf_get_error(obj->btf);
2582                 if (err) {
2583                         obj->btf = NULL;
2584                         pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2585                         goto out;
2586                 }
2587                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2588                 btf__set_pointer_size(obj->btf, 8);
2589         }
2590         if (btf_ext_data) {
2591                 if (!obj->btf) {
2592                         pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2593                                  BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2594                         goto out;
2595                 }
2596                 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2597                 err = libbpf_get_error(obj->btf_ext);
2598                 if (err) {
2599                         pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2600                                 BTF_EXT_ELF_SEC, err);
2601                         obj->btf_ext = NULL;
2602                         goto out;
2603                 }
2604         }
2605 out:
2606         if (err && libbpf_needs_btf(obj)) {
2607                 pr_warn("BTF is required, but is missing or corrupted.\n");
2608                 return err;
2609         }
2610         return 0;
2611 }
2612
2613 static int bpf_object__finalize_btf(struct bpf_object *obj)
2614 {
2615         int err;
2616
2617         if (!obj->btf)
2618                 return 0;
2619
2620         err = btf__finalize_data(obj, obj->btf);
2621         if (err) {
2622                 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2623                 return err;
2624         }
2625
2626         return 0;
2627 }
2628
2629 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2630 {
2631         if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2632             prog->type == BPF_PROG_TYPE_LSM)
2633                 return true;
2634
2635         /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2636          * also need vmlinux BTF
2637          */
2638         if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2639                 return true;
2640
2641         return false;
2642 }
2643
2644 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2645 {
2646         struct bpf_program *prog;
2647         int i;
2648
2649         /* CO-RE relocations need kernel BTF, only when btf_custom_path
2650          * is not specified
2651          */
2652         if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
2653                 return true;
2654
2655         /* Support for typed ksyms needs kernel BTF */
2656         for (i = 0; i < obj->nr_extern; i++) {
2657                 const struct extern_desc *ext;
2658
2659                 ext = &obj->externs[i];
2660                 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2661                         return true;
2662         }
2663
2664         bpf_object__for_each_program(prog, obj) {
2665                 if (!prog->load)
2666                         continue;
2667                 if (prog_needs_vmlinux_btf(prog))
2668                         return true;
2669         }
2670
2671         return false;
2672 }
2673
2674 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2675 {
2676         int err;
2677
2678         /* btf_vmlinux could be loaded earlier */
2679         if (obj->btf_vmlinux || obj->gen_loader)
2680                 return 0;
2681
2682         if (!force && !obj_needs_vmlinux_btf(obj))
2683                 return 0;
2684
2685         obj->btf_vmlinux = btf__load_vmlinux_btf();
2686         err = libbpf_get_error(obj->btf_vmlinux);
2687         if (err) {
2688                 pr_warn("Error loading vmlinux BTF: %d\n", err);
2689                 obj->btf_vmlinux = NULL;
2690                 return err;
2691         }
2692         return 0;
2693 }
2694
2695 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2696 {
2697         struct btf *kern_btf = obj->btf;
2698         bool btf_mandatory, sanitize;
2699         int i, err = 0;
2700
2701         if (!obj->btf)
2702                 return 0;
2703
2704         if (!kernel_supports(obj, FEAT_BTF)) {
2705                 if (kernel_needs_btf(obj)) {
2706                         err = -EOPNOTSUPP;
2707                         goto report;
2708                 }
2709                 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2710                 return 0;
2711         }
2712
2713         /* Even though some subprogs are global/weak, user might prefer more
2714          * permissive BPF verification process that BPF verifier performs for
2715          * static functions, taking into account more context from the caller
2716          * functions. In such case, they need to mark such subprogs with
2717          * __attribute__((visibility("hidden"))) and libbpf will adjust
2718          * corresponding FUNC BTF type to be marked as static and trigger more
2719          * involved BPF verification process.
2720          */
2721         for (i = 0; i < obj->nr_programs; i++) {
2722                 struct bpf_program *prog = &obj->programs[i];
2723                 struct btf_type *t;
2724                 const char *name;
2725                 int j, n;
2726
2727                 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
2728                         continue;
2729
2730                 n = btf__get_nr_types(obj->btf);
2731                 for (j = 1; j <= n; j++) {
2732                         t = btf_type_by_id(obj->btf, j);
2733                         if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
2734                                 continue;
2735
2736                         name = btf__str_by_offset(obj->btf, t->name_off);
2737                         if (strcmp(name, prog->name) != 0)
2738                                 continue;
2739
2740                         t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
2741                         break;
2742                 }
2743         }
2744
2745         sanitize = btf_needs_sanitization(obj);
2746         if (sanitize) {
2747                 const void *raw_data;
2748                 __u32 sz;
2749
2750                 /* clone BTF to sanitize a copy and leave the original intact */
2751                 raw_data = btf__get_raw_data(obj->btf, &sz);
2752                 kern_btf = btf__new(raw_data, sz);
2753                 err = libbpf_get_error(kern_btf);
2754                 if (err)
2755                         return err;
2756
2757                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2758                 btf__set_pointer_size(obj->btf, 8);
2759                 bpf_object__sanitize_btf(obj, kern_btf);
2760         }
2761
2762         if (obj->gen_loader) {
2763                 __u32 raw_size = 0;
2764                 const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
2765
2766                 if (!raw_data)
2767                         return -ENOMEM;
2768                 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
2769                 /* Pretend to have valid FD to pass various fd >= 0 checks.
2770                  * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
2771                  */
2772                 btf__set_fd(kern_btf, 0);
2773         } else {
2774                 err = btf__load_into_kernel(kern_btf);
2775         }
2776         if (sanitize) {
2777                 if (!err) {
2778                         /* move fd to libbpf's BTF */
2779                         btf__set_fd(obj->btf, btf__fd(kern_btf));
2780                         btf__set_fd(kern_btf, -1);
2781                 }
2782                 btf__free(kern_btf);
2783         }
2784 report:
2785         if (err) {
2786                 btf_mandatory = kernel_needs_btf(obj);
2787                 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2788                         btf_mandatory ? "BTF is mandatory, can't proceed."
2789                                       : "BTF is optional, ignoring.");
2790                 if (!btf_mandatory)
2791                         err = 0;
2792         }
2793         return err;
2794 }
2795
2796 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2797 {
2798         const char *name;
2799
2800         name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2801         if (!name) {
2802                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2803                         off, obj->path, elf_errmsg(-1));
2804                 return NULL;
2805         }
2806
2807         return name;
2808 }
2809
2810 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2811 {
2812         const char *name;
2813
2814         name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2815         if (!name) {
2816                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2817                         off, obj->path, elf_errmsg(-1));
2818                 return NULL;
2819         }
2820
2821         return name;
2822 }
2823
2824 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2825 {
2826         Elf_Scn *scn;
2827
2828         scn = elf_getscn(obj->efile.elf, idx);
2829         if (!scn) {
2830                 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2831                         idx, obj->path, elf_errmsg(-1));
2832                 return NULL;
2833         }
2834         return scn;
2835 }
2836
2837 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2838 {
2839         Elf_Scn *scn = NULL;
2840         Elf *elf = obj->efile.elf;
2841         const char *sec_name;
2842
2843         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2844                 sec_name = elf_sec_name(obj, scn);
2845                 if (!sec_name)
2846                         return NULL;
2847
2848                 if (strcmp(sec_name, name) != 0)
2849                         continue;
2850
2851                 return scn;
2852         }
2853         return NULL;
2854 }
2855
2856 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2857 {
2858         if (!scn)
2859                 return -EINVAL;
2860
2861         if (gelf_getshdr(scn, hdr) != hdr) {
2862                 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2863                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2864                 return -EINVAL;
2865         }
2866
2867         return 0;
2868 }
2869
2870 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2871 {
2872         const char *name;
2873         GElf_Shdr sh;
2874
2875         if (!scn)
2876                 return NULL;
2877
2878         if (elf_sec_hdr(obj, scn, &sh))
2879                 return NULL;
2880
2881         name = elf_sec_str(obj, sh.sh_name);
2882         if (!name) {
2883                 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2884                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2885                 return NULL;
2886         }
2887
2888         return name;
2889 }
2890
2891 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2892 {
2893         Elf_Data *data;
2894
2895         if (!scn)
2896                 return NULL;
2897
2898         data = elf_getdata(scn, 0);
2899         if (!data) {
2900                 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2901                         elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2902                         obj->path, elf_errmsg(-1));
2903                 return NULL;
2904         }
2905
2906         return data;
2907 }
2908
2909 static bool is_sec_name_dwarf(const char *name)
2910 {
2911         /* approximation, but the actual list is too long */
2912         return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2913 }
2914
2915 static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2916 {
2917         /* no special handling of .strtab */
2918         if (hdr->sh_type == SHT_STRTAB)
2919                 return true;
2920
2921         /* ignore .llvm_addrsig section as well */
2922         if (hdr->sh_type == SHT_LLVM_ADDRSIG)
2923                 return true;
2924
2925         /* no subprograms will lead to an empty .text section, ignore it */
2926         if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2927             strcmp(name, ".text") == 0)
2928                 return true;
2929
2930         /* DWARF sections */
2931         if (is_sec_name_dwarf(name))
2932                 return true;
2933
2934         if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2935                 name += sizeof(".rel") - 1;
2936                 /* DWARF section relocations */
2937                 if (is_sec_name_dwarf(name))
2938                         return true;
2939
2940                 /* .BTF and .BTF.ext don't need relocations */
2941                 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2942                     strcmp(name, BTF_EXT_ELF_SEC) == 0)
2943                         return true;
2944         }
2945
2946         return false;
2947 }
2948
2949 static int cmp_progs(const void *_a, const void *_b)
2950 {
2951         const struct bpf_program *a = _a;
2952         const struct bpf_program *b = _b;
2953
2954         if (a->sec_idx != b->sec_idx)
2955                 return a->sec_idx < b->sec_idx ? -1 : 1;
2956
2957         /* sec_insn_off can't be the same within the section */
2958         return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2959 }
2960
2961 static int bpf_object__elf_collect(struct bpf_object *obj)
2962 {
2963         Elf *elf = obj->efile.elf;
2964         Elf_Data *btf_ext_data = NULL;
2965         Elf_Data *btf_data = NULL;
2966         int idx = 0, err = 0;
2967         const char *name;
2968         Elf_Data *data;
2969         Elf_Scn *scn;
2970         GElf_Shdr sh;
2971
2972         /* a bunch of ELF parsing functionality depends on processing symbols,
2973          * so do the first pass and find the symbol table
2974          */
2975         scn = NULL;
2976         while ((scn = elf_nextscn(elf, scn)) != NULL) {
2977                 if (elf_sec_hdr(obj, scn, &sh))
2978                         return -LIBBPF_ERRNO__FORMAT;
2979
2980                 if (sh.sh_type == SHT_SYMTAB) {
2981                         if (obj->efile.symbols) {
2982                                 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2983                                 return -LIBBPF_ERRNO__FORMAT;
2984                         }
2985
2986                         data = elf_sec_data(obj, scn);
2987                         if (!data)
2988                                 return -LIBBPF_ERRNO__FORMAT;
2989
2990                         obj->efile.symbols = data;
2991                         obj->efile.symbols_shndx = elf_ndxscn(scn);
2992                         obj->efile.strtabidx = sh.sh_link;
2993                 }
2994         }
2995
2996         if (!obj->efile.symbols) {
2997                 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
2998                         obj->path);
2999                 return -ENOENT;
3000         }
3001
3002         scn = NULL;
3003         while ((scn = elf_nextscn(elf, scn)) != NULL) {
3004                 idx++;
3005
3006                 if (elf_sec_hdr(obj, scn, &sh))
3007                         return -LIBBPF_ERRNO__FORMAT;
3008
3009                 name = elf_sec_str(obj, sh.sh_name);
3010                 if (!name)
3011                         return -LIBBPF_ERRNO__FORMAT;
3012
3013                 if (ignore_elf_section(&sh, name))
3014                         continue;
3015
3016                 data = elf_sec_data(obj, scn);
3017                 if (!data)
3018                         return -LIBBPF_ERRNO__FORMAT;
3019
3020                 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3021                          idx, name, (unsigned long)data->d_size,
3022                          (int)sh.sh_link, (unsigned long)sh.sh_flags,
3023                          (int)sh.sh_type);
3024
3025                 if (strcmp(name, "license") == 0) {
3026                         err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3027                         if (err)
3028                                 return err;
3029                 } else if (strcmp(name, "version") == 0) {
3030                         err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3031                         if (err)
3032                                 return err;
3033                 } else if (strcmp(name, "maps") == 0) {
3034                         obj->efile.maps_shndx = idx;
3035                 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3036                         obj->efile.btf_maps_shndx = idx;
3037                 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3038                         btf_data = data;
3039                 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3040                         btf_ext_data = data;
3041                 } else if (sh.sh_type == SHT_SYMTAB) {
3042                         /* already processed during the first pass above */
3043                 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
3044                         if (sh.sh_flags & SHF_EXECINSTR) {
3045                                 if (strcmp(name, ".text") == 0)
3046                                         obj->efile.text_shndx = idx;
3047                                 err = bpf_object__add_programs(obj, data, name, idx);
3048                                 if (err)
3049                                         return err;
3050                         } else if (strcmp(name, DATA_SEC) == 0) {
3051                                 obj->efile.data = data;
3052                                 obj->efile.data_shndx = idx;
3053                         } else if (strcmp(name, RODATA_SEC) == 0) {
3054                                 obj->efile.rodata = data;
3055                                 obj->efile.rodata_shndx = idx;
3056                         } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3057                                 obj->efile.st_ops_data = data;
3058                                 obj->efile.st_ops_shndx = idx;
3059                         } else {
3060                                 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3061                                         idx, name);
3062                         }
3063                 } else if (sh.sh_type == SHT_REL) {
3064                         int nr_sects = obj->efile.nr_reloc_sects;
3065                         void *sects = obj->efile.reloc_sects;
3066                         int sec = sh.sh_info; /* points to other section */
3067
3068                         /* Only do relo for section with exec instructions */
3069                         if (!section_have_execinstr(obj, sec) &&
3070                             strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3071                             strcmp(name, ".rel" MAPS_ELF_SEC)) {
3072                                 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3073                                         idx, name, sec,
3074                                         elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
3075                                 continue;
3076                         }
3077
3078                         sects = libbpf_reallocarray(sects, nr_sects + 1,
3079                                                     sizeof(*obj->efile.reloc_sects));
3080                         if (!sects)
3081                                 return -ENOMEM;
3082
3083                         obj->efile.reloc_sects = sects;
3084                         obj->efile.nr_reloc_sects++;
3085
3086                         obj->efile.reloc_sects[nr_sects].shdr = sh;
3087                         obj->efile.reloc_sects[nr_sects].data = data;
3088                 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3089                         obj->efile.bss = data;
3090                         obj->efile.bss_shndx = idx;
3091                 } else {
3092                         pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3093                                 (size_t)sh.sh_size);
3094                 }
3095         }
3096
3097         if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3098                 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3099                 return -LIBBPF_ERRNO__FORMAT;
3100         }
3101
3102         /* sort BPF programs by section name and in-section instruction offset
3103          * for faster search */
3104         qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3105
3106         return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3107 }
3108
3109 static bool sym_is_extern(const GElf_Sym *sym)
3110 {
3111         int bind = GELF_ST_BIND(sym->st_info);
3112         /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3113         return sym->st_shndx == SHN_UNDEF &&
3114                (bind == STB_GLOBAL || bind == STB_WEAK) &&
3115                GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
3116 }
3117
3118 static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
3119 {
3120         int bind = GELF_ST_BIND(sym->st_info);
3121         int type = GELF_ST_TYPE(sym->st_info);
3122
3123         /* in .text section */
3124         if (sym->st_shndx != text_shndx)
3125                 return false;
3126
3127         /* local function */
3128         if (bind == STB_LOCAL && type == STT_SECTION)
3129                 return true;
3130
3131         /* global function */
3132         return bind == STB_GLOBAL && type == STT_FUNC;
3133 }
3134
3135 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3136 {
3137         const struct btf_type *t;
3138         const char *tname;
3139         int i, n;
3140
3141         if (!btf)
3142                 return -ESRCH;
3143
3144         n = btf__get_nr_types(btf);
3145         for (i = 1; i <= n; i++) {
3146                 t = btf__type_by_id(btf, i);
3147
3148                 if (!btf_is_var(t) && !btf_is_func(t))
3149                         continue;
3150
3151                 tname = btf__name_by_offset(btf, t->name_off);
3152                 if (strcmp(tname, ext_name))
3153                         continue;
3154
3155                 if (btf_is_var(t) &&
3156                     btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3157                         return -EINVAL;
3158
3159                 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3160                         return -EINVAL;
3161
3162                 return i;
3163         }
3164
3165         return -ENOENT;
3166 }
3167
3168 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3169         const struct btf_var_secinfo *vs;
3170         const struct btf_type *t;
3171         int i, j, n;
3172
3173         if (!btf)
3174                 return -ESRCH;
3175
3176         n = btf__get_nr_types(btf);
3177         for (i = 1; i <= n; i++) {
3178                 t = btf__type_by_id(btf, i);
3179
3180                 if (!btf_is_datasec(t))
3181                         continue;
3182
3183                 vs = btf_var_secinfos(t);
3184                 for (j = 0; j < btf_vlen(t); j++, vs++) {
3185                         if (vs->type == ext_btf_id)
3186                                 return i;
3187                 }
3188         }
3189
3190         return -ENOENT;
3191 }
3192
3193 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3194                                      bool *is_signed)
3195 {
3196         const struct btf_type *t;
3197         const char *name;
3198
3199         t = skip_mods_and_typedefs(btf, id, NULL);
3200         name = btf__name_by_offset(btf, t->name_off);
3201
3202         if (is_signed)
3203                 *is_signed = false;
3204         switch (btf_kind(t)) {
3205         case BTF_KIND_INT: {
3206                 int enc = btf_int_encoding(t);
3207
3208                 if (enc & BTF_INT_BOOL)
3209                         return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3210                 if (is_signed)
3211                         *is_signed = enc & BTF_INT_SIGNED;
3212                 if (t->size == 1)
3213                         return KCFG_CHAR;
3214                 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3215                         return KCFG_UNKNOWN;
3216                 return KCFG_INT;
3217         }
3218         case BTF_KIND_ENUM:
3219                 if (t->size != 4)
3220                         return KCFG_UNKNOWN;
3221                 if (strcmp(name, "libbpf_tristate"))
3222                         return KCFG_UNKNOWN;
3223                 return KCFG_TRISTATE;
3224         case BTF_KIND_ARRAY:
3225                 if (btf_array(t)->nelems == 0)
3226                         return KCFG_UNKNOWN;
3227                 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3228                         return KCFG_UNKNOWN;
3229                 return KCFG_CHAR_ARR;
3230         default:
3231                 return KCFG_UNKNOWN;
3232         }
3233 }
3234
3235 static int cmp_externs(const void *_a, const void *_b)
3236 {
3237         const struct extern_desc *a = _a;
3238         const struct extern_desc *b = _b;
3239
3240         if (a->type != b->type)
3241                 return a->type < b->type ? -1 : 1;
3242
3243         if (a->type == EXT_KCFG) {
3244                 /* descending order by alignment requirements */
3245                 if (a->kcfg.align != b->kcfg.align)
3246                         return a->kcfg.align > b->kcfg.align ? -1 : 1;
3247                 /* ascending order by size, within same alignment class */
3248                 if (a->kcfg.sz != b->kcfg.sz)
3249                         return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3250         }
3251
3252         /* resolve ties by name */
3253         return strcmp(a->name, b->name);
3254 }
3255
3256 static int find_int_btf_id(const struct btf *btf)
3257 {
3258         const struct btf_type *t;
3259         int i, n;
3260
3261         n = btf__get_nr_types(btf);
3262         for (i = 1; i <= n; i++) {
3263                 t = btf__type_by_id(btf, i);
3264
3265                 if (btf_is_int(t) && btf_int_bits(t) == 32)
3266                         return i;
3267         }
3268
3269         return 0;
3270 }
3271
3272 static int add_dummy_ksym_var(struct btf *btf)
3273 {
3274         int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3275         const struct btf_var_secinfo *vs;
3276         const struct btf_type *sec;
3277
3278         if (!btf)
3279                 return 0;
3280
3281         sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3282                                             BTF_KIND_DATASEC);
3283         if (sec_btf_id < 0)
3284                 return 0;
3285
3286         sec = btf__type_by_id(btf, sec_btf_id);
3287         vs = btf_var_secinfos(sec);
3288         for (i = 0; i < btf_vlen(sec); i++, vs++) {
3289                 const struct btf_type *vt;
3290
3291                 vt = btf__type_by_id(btf, vs->type);
3292                 if (btf_is_func(vt))
3293                         break;
3294         }
3295
3296         /* No func in ksyms sec.  No need to add dummy var. */
3297         if (i == btf_vlen(sec))
3298                 return 0;
3299
3300         int_btf_id = find_int_btf_id(btf);
3301         dummy_var_btf_id = btf__add_var(btf,
3302                                         "dummy_ksym",
3303                                         BTF_VAR_GLOBAL_ALLOCATED,
3304                                         int_btf_id);
3305         if (dummy_var_btf_id < 0)
3306                 pr_warn("cannot create a dummy_ksym var\n");
3307
3308         return dummy_var_btf_id;
3309 }
3310
3311 static int bpf_object__collect_externs(struct bpf_object *obj)
3312 {
3313         struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3314         const struct btf_type *t;
3315         struct extern_desc *ext;
3316         int i, n, off, dummy_var_btf_id;
3317         const char *ext_name, *sec_name;
3318         Elf_Scn *scn;
3319         GElf_Shdr sh;
3320
3321         if (!obj->efile.symbols)
3322                 return 0;
3323
3324         scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3325         if (elf_sec_hdr(obj, scn, &sh))
3326                 return -LIBBPF_ERRNO__FORMAT;
3327
3328         dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3329         if (dummy_var_btf_id < 0)
3330                 return dummy_var_btf_id;
3331
3332         n = sh.sh_size / sh.sh_entsize;
3333         pr_debug("looking for externs among %d symbols...\n", n);
3334
3335         for (i = 0; i < n; i++) {
3336                 GElf_Sym sym;
3337
3338                 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3339                         return -LIBBPF_ERRNO__FORMAT;
3340                 if (!sym_is_extern(&sym))
3341                         continue;
3342                 ext_name = elf_sym_str(obj, sym.st_name);
3343                 if (!ext_name || !ext_name[0])
3344                         continue;
3345
3346                 ext = obj->externs;
3347                 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3348                 if (!ext)
3349                         return -ENOMEM;
3350                 obj->externs = ext;
3351                 ext = &ext[obj->nr_extern];
3352                 memset(ext, 0, sizeof(*ext));
3353                 obj->nr_extern++;
3354
3355                 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3356                 if (ext->btf_id <= 0) {
3357                         pr_warn("failed to find BTF for extern '%s': %d\n",
3358                                 ext_name, ext->btf_id);
3359                         return ext->btf_id;
3360                 }
3361                 t = btf__type_by_id(obj->btf, ext->btf_id);
3362                 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3363                 ext->sym_idx = i;
3364                 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3365
3366                 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3367                 if (ext->sec_btf_id <= 0) {
3368                         pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3369                                 ext_name, ext->btf_id, ext->sec_btf_id);
3370                         return ext->sec_btf_id;
3371                 }
3372                 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3373                 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3374
3375                 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3376                         if (btf_is_func(t)) {
3377                                 pr_warn("extern function %s is unsupported under %s section\n",
3378                                         ext->name, KCONFIG_SEC);
3379                                 return -ENOTSUP;
3380                         }
3381                         kcfg_sec = sec;
3382                         ext->type = EXT_KCFG;
3383                         ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3384                         if (ext->kcfg.sz <= 0) {
3385                                 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3386                                         ext_name, ext->kcfg.sz);
3387                                 return ext->kcfg.sz;
3388                         }
3389                         ext->kcfg.align = btf__align_of(obj->btf, t->type);
3390                         if (ext->kcfg.align <= 0) {
3391                                 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3392                                         ext_name, ext->kcfg.align);
3393                                 return -EINVAL;
3394                         }
3395                         ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3396                                                         &ext->kcfg.is_signed);
3397                         if (ext->kcfg.type == KCFG_UNKNOWN) {
3398                                 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3399                                 return -ENOTSUP;
3400                         }
3401                 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3402                         if (btf_is_func(t) && ext->is_weak) {
3403                                 pr_warn("extern weak function %s is unsupported\n",
3404                                         ext->name);
3405                                 return -ENOTSUP;
3406                         }
3407                         ksym_sec = sec;
3408                         ext->type = EXT_KSYM;
3409                         skip_mods_and_typedefs(obj->btf, t->type,
3410                                                &ext->ksym.type_id);
3411                 } else {
3412                         pr_warn("unrecognized extern section '%s'\n", sec_name);
3413                         return -ENOTSUP;
3414                 }
3415         }
3416         pr_debug("collected %d externs total\n", obj->nr_extern);
3417
3418         if (!obj->nr_extern)
3419                 return 0;
3420
3421         /* sort externs by type, for kcfg ones also by (align, size, name) */
3422         qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3423
3424         /* for .ksyms section, we need to turn all externs into allocated
3425          * variables in BTF to pass kernel verification; we do this by
3426          * pretending that each extern is a 8-byte variable
3427          */
3428         if (ksym_sec) {
3429                 /* find existing 4-byte integer type in BTF to use for fake
3430                  * extern variables in DATASEC
3431                  */
3432                 int int_btf_id = find_int_btf_id(obj->btf);
3433                 /* For extern function, a dummy_var added earlier
3434                  * will be used to replace the vs->type and
3435                  * its name string will be used to refill
3436                  * the missing param's name.
3437                  */
3438                 const struct btf_type *dummy_var;
3439
3440                 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3441                 for (i = 0; i < obj->nr_extern; i++) {
3442                         ext = &obj->externs[i];
3443                         if (ext->type != EXT_KSYM)
3444                                 continue;
3445                         pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3446                                  i, ext->sym_idx, ext->name);
3447                 }
3448
3449                 sec = ksym_sec;
3450                 n = btf_vlen(sec);
3451                 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3452                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3453                         struct btf_type *vt;
3454
3455                         vt = (void *)btf__type_by_id(obj->btf, vs->type);
3456                         ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3457                         ext = find_extern_by_name(obj, ext_name);
3458                         if (!ext) {
3459                                 pr_warn("failed to find extern definition for BTF %s '%s'\n",
3460                                         btf_kind_str(vt), ext_name);
3461                                 return -ESRCH;
3462                         }
3463                         if (btf_is_func(vt)) {
3464                                 const struct btf_type *func_proto;
3465                                 struct btf_param *param;
3466                                 int j;
3467
3468                                 func_proto = btf__type_by_id(obj->btf,
3469                                                              vt->type);
3470                                 param = btf_params(func_proto);
3471                                 /* Reuse the dummy_var string if the
3472                                  * func proto does not have param name.
3473                                  */
3474                                 for (j = 0; j < btf_vlen(func_proto); j++)
3475                                         if (param[j].type && !param[j].name_off)
3476                                                 param[j].name_off =
3477                                                         dummy_var->name_off;
3478                                 vs->type = dummy_var_btf_id;
3479                                 vt->info &= ~0xffff;
3480                                 vt->info |= BTF_FUNC_GLOBAL;
3481                         } else {
3482                                 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3483                                 vt->type = int_btf_id;
3484                         }
3485                         vs->offset = off;
3486                         vs->size = sizeof(int);
3487                 }
3488                 sec->size = off;
3489         }
3490
3491         if (kcfg_sec) {
3492                 sec = kcfg_sec;
3493                 /* for kcfg externs calculate their offsets within a .kconfig map */
3494                 off = 0;
3495                 for (i = 0; i < obj->nr_extern; i++) {
3496                         ext = &obj->externs[i];
3497                         if (ext->type != EXT_KCFG)
3498                                 continue;
3499
3500                         ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3501                         off = ext->kcfg.data_off + ext->kcfg.sz;
3502                         pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3503                                  i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3504                 }
3505                 sec->size = off;
3506                 n = btf_vlen(sec);
3507                 for (i = 0; i < n; i++) {
3508                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3509
3510                         t = btf__type_by_id(obj->btf, vs->type);
3511                         ext_name = btf__name_by_offset(obj->btf, t->name_off);
3512                         ext = find_extern_by_name(obj, ext_name);
3513                         if (!ext) {
3514                                 pr_warn("failed to find extern definition for BTF var '%s'\n",
3515                                         ext_name);
3516                                 return -ESRCH;
3517                         }
3518                         btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3519                         vs->offset = ext->kcfg.data_off;
3520                 }
3521         }
3522         return 0;
3523 }
3524
3525 struct bpf_program *
3526 bpf_object__find_program_by_title(const struct bpf_object *obj,
3527                                   const char *title)
3528 {
3529         struct bpf_program *pos;
3530
3531         bpf_object__for_each_program(pos, obj) {
3532                 if (pos->sec_name && !strcmp(pos->sec_name, title))
3533                         return pos;
3534         }
3535         return errno = ENOENT, NULL;
3536 }
3537
3538 static bool prog_is_subprog(const struct bpf_object *obj,
3539                             const struct bpf_program *prog)
3540 {
3541         /* For legacy reasons, libbpf supports an entry-point BPF programs
3542          * without SEC() attribute, i.e., those in the .text section. But if
3543          * there are 2 or more such programs in the .text section, they all
3544          * must be subprograms called from entry-point BPF programs in
3545          * designated SEC()'tions, otherwise there is no way to distinguish
3546          * which of those programs should be loaded vs which are a subprogram.
3547          * Similarly, if there is a function/program in .text and at least one
3548          * other BPF program with custom SEC() attribute, then we just assume
3549          * .text programs are subprograms (even if they are not called from
3550          * other programs), because libbpf never explicitly supported mixing
3551          * SEC()-designated BPF programs and .text entry-point BPF programs.
3552          */
3553         return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3554 }
3555
3556 struct bpf_program *
3557 bpf_object__find_program_by_name(const struct bpf_object *obj,
3558                                  const char *name)
3559 {
3560         struct bpf_program *prog;
3561
3562         bpf_object__for_each_program(prog, obj) {
3563                 if (prog_is_subprog(obj, prog))
3564                         continue;
3565                 if (!strcmp(prog->name, name))
3566                         return prog;
3567         }
3568         return errno = ENOENT, NULL;
3569 }
3570
3571 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3572                                       int shndx)
3573 {
3574         return shndx == obj->efile.data_shndx ||
3575                shndx == obj->efile.bss_shndx ||
3576                shndx == obj->efile.rodata_shndx;
3577 }
3578
3579 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3580                                       int shndx)
3581 {
3582         return shndx == obj->efile.maps_shndx ||
3583                shndx == obj->efile.btf_maps_shndx;
3584 }
3585
3586 static enum libbpf_map_type
3587 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3588 {
3589         if (shndx == obj->efile.data_shndx)
3590                 return LIBBPF_MAP_DATA;
3591         else if (shndx == obj->efile.bss_shndx)
3592                 return LIBBPF_MAP_BSS;
3593         else if (shndx == obj->efile.rodata_shndx)
3594                 return LIBBPF_MAP_RODATA;
3595         else if (shndx == obj->efile.symbols_shndx)
3596                 return LIBBPF_MAP_KCONFIG;
3597         else
3598                 return LIBBPF_MAP_UNSPEC;
3599 }
3600
3601 static int bpf_program__record_reloc(struct bpf_program *prog,
3602                                      struct reloc_desc *reloc_desc,
3603                                      __u32 insn_idx, const char *sym_name,
3604                                      const GElf_Sym *sym, const GElf_Rel *rel)
3605 {
3606         struct bpf_insn *insn = &prog->insns[insn_idx];
3607         size_t map_idx, nr_maps = prog->obj->nr_maps;
3608         struct bpf_object *obj = prog->obj;
3609         __u32 shdr_idx = sym->st_shndx;
3610         enum libbpf_map_type type;
3611         const char *sym_sec_name;
3612         struct bpf_map *map;
3613
3614         if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3615                 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3616                         prog->name, sym_name, insn_idx, insn->code);
3617                 return -LIBBPF_ERRNO__RELOC;
3618         }
3619
3620         if (sym_is_extern(sym)) {
3621                 int sym_idx = GELF_R_SYM(rel->r_info);
3622                 int i, n = obj->nr_extern;
3623                 struct extern_desc *ext;
3624
3625                 for (i = 0; i < n; i++) {
3626                         ext = &obj->externs[i];
3627                         if (ext->sym_idx == sym_idx)
3628                                 break;
3629                 }
3630                 if (i >= n) {
3631                         pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3632                                 prog->name, sym_name, sym_idx);
3633                         return -LIBBPF_ERRNO__RELOC;
3634                 }
3635                 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3636                          prog->name, i, ext->name, ext->sym_idx, insn_idx);
3637                 if (insn->code == (BPF_JMP | BPF_CALL))
3638                         reloc_desc->type = RELO_EXTERN_FUNC;
3639                 else
3640                         reloc_desc->type = RELO_EXTERN_VAR;
3641                 reloc_desc->insn_idx = insn_idx;
3642                 reloc_desc->sym_off = i; /* sym_off stores extern index */
3643                 return 0;
3644         }
3645
3646         /* sub-program call relocation */
3647         if (is_call_insn(insn)) {
3648                 if (insn->src_reg != BPF_PSEUDO_CALL) {
3649                         pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3650                         return -LIBBPF_ERRNO__RELOC;
3651                 }
3652                 /* text_shndx can be 0, if no default "main" program exists */
3653                 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3654                         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3655                         pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3656                                 prog->name, sym_name, sym_sec_name);
3657                         return -LIBBPF_ERRNO__RELOC;
3658                 }
3659                 if (sym->st_value % BPF_INSN_SZ) {
3660                         pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3661                                 prog->name, sym_name, (size_t)sym->st_value);
3662                         return -LIBBPF_ERRNO__RELOC;
3663                 }
3664                 reloc_desc->type = RELO_CALL;
3665                 reloc_desc->insn_idx = insn_idx;
3666                 reloc_desc->sym_off = sym->st_value;
3667                 return 0;
3668         }
3669
3670         if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3671                 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3672                         prog->name, sym_name, shdr_idx);
3673                 return -LIBBPF_ERRNO__RELOC;
3674         }
3675
3676         /* loading subprog addresses */
3677         if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3678                 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
3679                  * local_func: sym->st_value = 0, insn->imm = offset in the section.
3680                  */
3681                 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3682                         pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3683                                 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3684                         return -LIBBPF_ERRNO__RELOC;
3685                 }
3686
3687                 reloc_desc->type = RELO_SUBPROG_ADDR;
3688                 reloc_desc->insn_idx = insn_idx;
3689                 reloc_desc->sym_off = sym->st_value;
3690                 return 0;
3691         }
3692
3693         type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3694         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3695
3696         /* generic map reference relocation */
3697         if (type == LIBBPF_MAP_UNSPEC) {
3698                 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3699                         pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3700                                 prog->name, sym_name, sym_sec_name);
3701                         return -LIBBPF_ERRNO__RELOC;
3702                 }
3703                 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3704                         map = &obj->maps[map_idx];
3705                         if (map->libbpf_type != type ||
3706                             map->sec_idx != sym->st_shndx ||
3707                             map->sec_offset != sym->st_value)
3708                                 continue;
3709                         pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3710                                  prog->name, map_idx, map->name, map->sec_idx,
3711                                  map->sec_offset, insn_idx);
3712                         break;
3713                 }
3714                 if (map_idx >= nr_maps) {
3715                         pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3716                                 prog->name, sym_sec_name, (size_t)sym->st_value);
3717                         return -LIBBPF_ERRNO__RELOC;
3718                 }
3719                 reloc_desc->type = RELO_LD64;
3720                 reloc_desc->insn_idx = insn_idx;
3721                 reloc_desc->map_idx = map_idx;
3722                 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3723                 return 0;
3724         }
3725
3726         /* global data map relocation */
3727         if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3728                 pr_warn("prog '%s': bad data relo against section '%s'\n",
3729                         prog->name, sym_sec_name);
3730                 return -LIBBPF_ERRNO__RELOC;
3731         }
3732         for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3733                 map = &obj->maps[map_idx];
3734                 if (map->libbpf_type != type)
3735                         continue;
3736                 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3737                          prog->name, map_idx, map->name, map->sec_idx,
3738                          map->sec_offset, insn_idx);
3739                 break;
3740         }
3741         if (map_idx >= nr_maps) {
3742                 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3743                         prog->name, sym_sec_name);
3744                 return -LIBBPF_ERRNO__RELOC;
3745         }
3746
3747         reloc_desc->type = RELO_DATA;
3748         reloc_desc->insn_idx = insn_idx;
3749         reloc_desc->map_idx = map_idx;
3750         reloc_desc->sym_off = sym->st_value;
3751         return 0;
3752 }
3753
3754 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3755 {
3756         return insn_idx >= prog->sec_insn_off &&
3757                insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3758 }
3759
3760 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3761                                                  size_t sec_idx, size_t insn_idx)
3762 {
3763         int l = 0, r = obj->nr_programs - 1, m;
3764         struct bpf_program *prog;
3765
3766         while (l < r) {
3767                 m = l + (r - l + 1) / 2;
3768                 prog = &obj->programs[m];
3769
3770                 if (prog->sec_idx < sec_idx ||
3771                     (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3772                         l = m;
3773                 else
3774                         r = m - 1;
3775         }
3776         /* matching program could be at index l, but it still might be the
3777          * wrong one, so we need to double check conditions for the last time
3778          */
3779         prog = &obj->programs[l];
3780         if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3781                 return prog;
3782         return NULL;
3783 }
3784
3785 static int
3786 bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3787 {
3788         Elf_Data *symbols = obj->efile.symbols;
3789         const char *relo_sec_name, *sec_name;
3790         size_t sec_idx = shdr->sh_info;
3791         struct bpf_program *prog;
3792         struct reloc_desc *relos;
3793         int err, i, nrels;
3794         const char *sym_name;
3795         __u32 insn_idx;
3796         Elf_Scn *scn;
3797         Elf_Data *scn_data;
3798         GElf_Sym sym;
3799         GElf_Rel rel;
3800
3801         scn = elf_sec_by_idx(obj, sec_idx);
3802         scn_data = elf_sec_data(obj, scn);
3803
3804         relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3805         sec_name = elf_sec_name(obj, scn);
3806         if (!relo_sec_name || !sec_name)
3807                 return -EINVAL;
3808
3809         pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3810                  relo_sec_name, sec_idx, sec_name);
3811         nrels = shdr->sh_size / shdr->sh_entsize;
3812
3813         for (i = 0; i < nrels; i++) {
3814                 if (!gelf_getrel(data, i, &rel)) {
3815                         pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3816                         return -LIBBPF_ERRNO__FORMAT;
3817                 }
3818                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3819                         pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3820                                 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3821                         return -LIBBPF_ERRNO__FORMAT;
3822                 }
3823
3824                 if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
3825                         pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3826                                 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3827                         return -LIBBPF_ERRNO__FORMAT;
3828                 }
3829
3830                 insn_idx = rel.r_offset / BPF_INSN_SZ;
3831                 /* relocations against static functions are recorded as
3832                  * relocations against the section that contains a function;
3833                  * in such case, symbol will be STT_SECTION and sym.st_name
3834                  * will point to empty string (0), so fetch section name
3835                  * instead
3836                  */
3837                 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3838                         sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3839                 else
3840                         sym_name = elf_sym_str(obj, sym.st_name);
3841                 sym_name = sym_name ?: "<?";
3842
3843                 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3844                          relo_sec_name, i, insn_idx, sym_name);
3845
3846                 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3847                 if (!prog) {
3848                         pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
3849                                 relo_sec_name, i, sec_name, insn_idx);
3850                         continue;
3851                 }
3852
3853                 relos = libbpf_reallocarray(prog->reloc_desc,
3854                                             prog->nr_reloc + 1, sizeof(*relos));
3855                 if (!relos)
3856                         return -ENOMEM;
3857                 prog->reloc_desc = relos;
3858
3859                 /* adjust insn_idx to local BPF program frame of reference */
3860                 insn_idx -= prog->sec_insn_off;
3861                 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3862                                                 insn_idx, sym_name, &sym, &rel);
3863                 if (err)
3864                         return err;
3865
3866                 prog->nr_reloc++;
3867         }
3868         return 0;
3869 }
3870
3871 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3872 {
3873         struct bpf_map_def *def = &map->def;
3874         __u32 key_type_id = 0, value_type_id = 0;
3875         int ret;
3876
3877         /* if it's BTF-defined map, we don't need to search for type IDs.
3878          * For struct_ops map, it does not need btf_key_type_id and
3879          * btf_value_type_id.
3880          */
3881         if (map->sec_idx == obj->efile.btf_maps_shndx ||
3882             bpf_map__is_struct_ops(map))
3883                 return 0;
3884
3885         if (!bpf_map__is_internal(map)) {
3886                 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3887                                            def->value_size, &key_type_id,
3888                                            &value_type_id);
3889         } else {
3890                 /*
3891                  * LLVM annotates global data differently in BTF, that is,
3892                  * only as '.data', '.bss' or '.rodata'.
3893                  */
3894                 ret = btf__find_by_name(obj->btf,
3895                                 libbpf_type_to_btf_name[map->libbpf_type]);
3896         }
3897         if (ret < 0)
3898                 return ret;
3899
3900         map->btf_key_type_id = key_type_id;
3901         map->btf_value_type_id = bpf_map__is_internal(map) ?
3902                                  ret : value_type_id;
3903         return 0;
3904 }
3905
3906 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
3907 {
3908         char file[PATH_MAX], buff[4096];
3909         FILE *fp;
3910         __u32 val;
3911         int err;
3912
3913         snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
3914         memset(info, 0, sizeof(*info));
3915
3916         fp = fopen(file, "r");
3917         if (!fp) {
3918                 err = -errno;
3919                 pr_warn("failed to open %s: %d. No procfs support?\n", file,
3920                         err);
3921                 return err;
3922         }
3923
3924         while (fgets(buff, sizeof(buff), fp)) {
3925                 if (sscanf(buff, "map_type:\t%u", &val) == 1)
3926                         info->type = val;
3927                 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
3928                         info->key_size = val;
3929                 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
3930                         info->value_size = val;
3931                 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
3932                         info->max_entries = val;
3933                 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
3934                         info->map_flags = val;
3935         }
3936
3937         fclose(fp);
3938
3939         return 0;
3940 }
3941
3942 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3943 {
3944         struct bpf_map_info info = {};
3945         __u32 len = sizeof(info);
3946         int new_fd, err;
3947         char *new_name;
3948
3949         err = bpf_obj_get_info_by_fd(fd, &info, &len);
3950         if (err && errno == EINVAL)
3951                 err = bpf_get_map_info_from_fdinfo(fd, &info);
3952         if (err)
3953                 return libbpf_err(err);
3954
3955         new_name = strdup(info.name);
3956         if (!new_name)
3957                 return libbpf_err(-errno);
3958
3959         new_fd = open("/", O_RDONLY | O_CLOEXEC);
3960         if (new_fd < 0) {
3961                 err = -errno;
3962                 goto err_free_new_name;
3963         }
3964
3965         new_fd = dup3(fd, new_fd, O_CLOEXEC);
3966         if (new_fd < 0) {
3967                 err = -errno;
3968                 goto err_close_new_fd;
3969         }
3970
3971         err = zclose(map->fd);
3972         if (err) {
3973                 err = -errno;
3974                 goto err_close_new_fd;
3975         }
3976         free(map->name);
3977
3978         map->fd = new_fd;
3979         map->name = new_name;
3980         map->def.type = info.type;
3981         map->def.key_size = info.key_size;
3982         map->def.value_size = info.value_size;
3983         map->def.max_entries = info.max_entries;
3984         map->def.map_flags = info.map_flags;
3985         map->btf_key_type_id = info.btf_key_type_id;
3986         map->btf_value_type_id = info.btf_value_type_id;
3987         map->reused = true;
3988
3989         return 0;
3990
3991 err_close_new_fd:
3992         close(new_fd);
3993 err_free_new_name:
3994         free(new_name);
3995         return libbpf_err(err);
3996 }
3997
3998 __u32 bpf_map__max_entries(const struct bpf_map *map)
3999 {
4000         return map->def.max_entries;
4001 }
4002
4003 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4004 {
4005         if (!bpf_map_type__is_map_in_map(map->def.type))
4006                 return errno = EINVAL, NULL;
4007
4008         return map->inner_map;
4009 }
4010
4011 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4012 {
4013         if (map->fd >= 0)
4014                 return libbpf_err(-EBUSY);
4015         map->def.max_entries = max_entries;
4016         return 0;
4017 }
4018
4019 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
4020 {
4021         if (!map || !max_entries)
4022                 return libbpf_err(-EINVAL);
4023
4024         return bpf_map__set_max_entries(map, max_entries);
4025 }
4026
4027 static int
4028 bpf_object__probe_loading(struct bpf_object *obj)
4029 {
4030         struct bpf_load_program_attr attr;
4031         char *cp, errmsg[STRERR_BUFSIZE];
4032         struct bpf_insn insns[] = {
4033                 BPF_MOV64_IMM(BPF_REG_0, 0),
4034                 BPF_EXIT_INSN(),
4035         };
4036         int ret;
4037
4038         if (obj->gen_loader)
4039                 return 0;
4040
4041         /* make sure basic loading works */
4042
4043         memset(&attr, 0, sizeof(attr));
4044         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4045         attr.insns = insns;
4046         attr.insns_cnt = ARRAY_SIZE(insns);
4047         attr.license = "GPL";
4048
4049         ret = bpf_load_program_xattr(&attr, NULL, 0);
4050         if (ret < 0) {
4051                 attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
4052                 ret = bpf_load_program_xattr(&attr, NULL, 0);
4053         }
4054         if (ret < 0) {
4055                 ret = errno;
4056                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4057                 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4058                         "program. Make sure your kernel supports BPF "
4059                         "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4060                         "set to big enough value.\n", __func__, cp, ret);
4061                 return -ret;
4062         }
4063         close(ret);
4064
4065         return 0;
4066 }
4067
4068 static int probe_fd(int fd)
4069 {
4070         if (fd >= 0)
4071                 close(fd);
4072         return fd >= 0;
4073 }
4074
4075 static int probe_kern_prog_name(void)
4076 {
4077         struct bpf_load_program_attr attr;
4078         struct bpf_insn insns[] = {
4079                 BPF_MOV64_IMM(BPF_REG_0, 0),
4080                 BPF_EXIT_INSN(),
4081         };
4082         int ret;
4083
4084         /* make sure loading with name works */
4085
4086         memset(&attr, 0, sizeof(attr));
4087         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4088         attr.insns = insns;
4089         attr.insns_cnt = ARRAY_SIZE(insns);
4090         attr.license = "GPL";
4091         attr.name = "test";
4092         ret = bpf_load_program_xattr(&attr, NULL, 0);
4093         return probe_fd(ret);
4094 }
4095
4096 static int probe_kern_global_data(void)
4097 {
4098         struct bpf_load_program_attr prg_attr;
4099         struct bpf_create_map_attr map_attr;
4100         char *cp, errmsg[STRERR_BUFSIZE];
4101         struct bpf_insn insns[] = {
4102                 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4103                 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4104                 BPF_MOV64_IMM(BPF_REG_0, 0),
4105                 BPF_EXIT_INSN(),
4106         };
4107         int ret, map;
4108
4109         memset(&map_attr, 0, sizeof(map_attr));
4110         map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4111         map_attr.key_size = sizeof(int);
4112         map_attr.value_size = 32;
4113         map_attr.max_entries = 1;
4114
4115         map = bpf_create_map_xattr(&map_attr);
4116         if (map < 0) {
4117                 ret = -errno;
4118                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4119                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4120                         __func__, cp, -ret);
4121                 return ret;
4122         }
4123
4124         insns[0].imm = map;
4125
4126         memset(&prg_attr, 0, sizeof(prg_attr));
4127         prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4128         prg_attr.insns = insns;
4129         prg_attr.insns_cnt = ARRAY_SIZE(insns);
4130         prg_attr.license = "GPL";
4131
4132         ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
4133         close(map);
4134         return probe_fd(ret);
4135 }
4136
4137 static int probe_kern_btf(void)
4138 {
4139         static const char strs[] = "\0int";
4140         __u32 types[] = {
4141                 /* int */
4142                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4143         };
4144
4145         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4146                                              strs, sizeof(strs)));
4147 }
4148
4149 static int probe_kern_btf_func(void)
4150 {
4151         static const char strs[] = "\0int\0x\0a";
4152         /* void x(int a) {} */
4153         __u32 types[] = {
4154                 /* int */
4155                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4156                 /* FUNC_PROTO */                                /* [2] */
4157                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4158                 BTF_PARAM_ENC(7, 1),
4159                 /* FUNC x */                                    /* [3] */
4160                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4161         };
4162
4163         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4164                                              strs, sizeof(strs)));
4165 }
4166
4167 static int probe_kern_btf_func_global(void)
4168 {
4169         static const char strs[] = "\0int\0x\0a";
4170         /* static void x(int a) {} */
4171         __u32 types[] = {
4172                 /* int */
4173                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4174                 /* FUNC_PROTO */                                /* [2] */
4175                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4176                 BTF_PARAM_ENC(7, 1),
4177                 /* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
4178                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4179         };
4180
4181         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4182                                              strs, sizeof(strs)));
4183 }
4184
4185 static int probe_kern_btf_datasec(void)
4186 {
4187         static const char strs[] = "\0x\0.data";
4188         /* static int a; */
4189         __u32 types[] = {
4190                 /* int */
4191                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4192                 /* VAR x */                                     /* [2] */
4193                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4194                 BTF_VAR_STATIC,
4195                 /* DATASEC val */                               /* [3] */
4196                 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4197                 BTF_VAR_SECINFO_ENC(2, 0, 4),
4198         };
4199
4200         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4201                                              strs, sizeof(strs)));
4202 }
4203
4204 static int probe_kern_btf_float(void)
4205 {
4206         static const char strs[] = "\0float";
4207         __u32 types[] = {
4208                 /* float */
4209                 BTF_TYPE_FLOAT_ENC(1, 4),
4210         };
4211
4212         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4213                                              strs, sizeof(strs)));
4214 }
4215
4216 static int probe_kern_array_mmap(void)
4217 {
4218         struct bpf_create_map_attr attr = {
4219                 .map_type = BPF_MAP_TYPE_ARRAY,
4220                 .map_flags = BPF_F_MMAPABLE,
4221                 .key_size = sizeof(int),
4222                 .value_size = sizeof(int),
4223                 .max_entries = 1,
4224         };
4225
4226         return probe_fd(bpf_create_map_xattr(&attr));
4227 }
4228
4229 static int probe_kern_exp_attach_type(void)
4230 {
4231         struct bpf_load_program_attr attr;
4232         struct bpf_insn insns[] = {
4233                 BPF_MOV64_IMM(BPF_REG_0, 0),
4234                 BPF_EXIT_INSN(),
4235         };
4236
4237         memset(&attr, 0, sizeof(attr));
4238         /* use any valid combination of program type and (optional)
4239          * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
4240          * to see if kernel supports expected_attach_type field for
4241          * BPF_PROG_LOAD command
4242          */
4243         attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
4244         attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
4245         attr.insns = insns;
4246         attr.insns_cnt = ARRAY_SIZE(insns);
4247         attr.license = "GPL";
4248
4249         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4250 }
4251
4252 static int probe_kern_probe_read_kernel(void)
4253 {
4254         struct bpf_load_program_attr attr;
4255         struct bpf_insn insns[] = {
4256                 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),   /* r1 = r10 (fp) */
4257                 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),  /* r1 += -8 */
4258                 BPF_MOV64_IMM(BPF_REG_2, 8),            /* r2 = 8 */
4259                 BPF_MOV64_IMM(BPF_REG_3, 0),            /* r3 = 0 */
4260                 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4261                 BPF_EXIT_INSN(),
4262         };
4263
4264         memset(&attr, 0, sizeof(attr));
4265         attr.prog_type = BPF_PROG_TYPE_KPROBE;
4266         attr.insns = insns;
4267         attr.insns_cnt = ARRAY_SIZE(insns);
4268         attr.license = "GPL";
4269
4270         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4271 }
4272
4273 static int probe_prog_bind_map(void)
4274 {
4275         struct bpf_load_program_attr prg_attr;
4276         struct bpf_create_map_attr map_attr;
4277         char *cp, errmsg[STRERR_BUFSIZE];
4278         struct bpf_insn insns[] = {
4279                 BPF_MOV64_IMM(BPF_REG_0, 0),
4280                 BPF_EXIT_INSN(),
4281         };
4282         int ret, map, prog;
4283
4284         memset(&map_attr, 0, sizeof(map_attr));
4285         map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4286         map_attr.key_size = sizeof(int);
4287         map_attr.value_size = 32;
4288         map_attr.max_entries = 1;
4289
4290         map = bpf_create_map_xattr(&map_attr);
4291         if (map < 0) {
4292                 ret = -errno;
4293                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4294                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4295                         __func__, cp, -ret);
4296                 return ret;
4297         }
4298
4299         memset(&prg_attr, 0, sizeof(prg_attr));
4300         prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4301         prg_attr.insns = insns;
4302         prg_attr.insns_cnt = ARRAY_SIZE(insns);
4303         prg_attr.license = "GPL";
4304
4305         prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
4306         if (prog < 0) {
4307                 close(map);
4308                 return 0;
4309         }
4310
4311         ret = bpf_prog_bind_map(prog, map, NULL);
4312
4313         close(map);
4314         close(prog);
4315
4316         return ret >= 0;
4317 }
4318
4319 static int probe_module_btf(void)
4320 {
4321         static const char strs[] = "\0int";
4322         __u32 types[] = {
4323                 /* int */
4324                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4325         };
4326         struct bpf_btf_info info;
4327         __u32 len = sizeof(info);
4328         char name[16];
4329         int fd, err;
4330
4331         fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4332         if (fd < 0)
4333                 return 0; /* BTF not supported at all */
4334
4335         memset(&info, 0, sizeof(info));
4336         info.name = ptr_to_u64(name);
4337         info.name_len = sizeof(name);
4338
4339         /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4340          * kernel's module BTF support coincides with support for
4341          * name/name_len fields in struct bpf_btf_info.
4342          */
4343         err = bpf_obj_get_info_by_fd(fd, &info, &len);
4344         close(fd);
4345         return !err;
4346 }
4347
4348 static int probe_perf_link(void)
4349 {
4350         struct bpf_load_program_attr attr;
4351         struct bpf_insn insns[] = {
4352                 BPF_MOV64_IMM(BPF_REG_0, 0),
4353                 BPF_EXIT_INSN(),
4354         };
4355         int prog_fd, link_fd, err;
4356
4357         memset(&attr, 0, sizeof(attr));
4358         attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
4359         attr.insns = insns;
4360         attr.insns_cnt = ARRAY_SIZE(insns);
4361         attr.license = "GPL";
4362         prog_fd = bpf_load_program_xattr(&attr, NULL, 0);
4363         if (prog_fd < 0)
4364                 return -errno;
4365
4366         /* use invalid perf_event FD to get EBADF, if link is supported;
4367          * otherwise EINVAL should be returned
4368          */
4369         link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
4370         err = -errno; /* close() can clobber errno */
4371
4372         if (link_fd >= 0)
4373                 close(link_fd);
4374         close(prog_fd);
4375
4376         return link_fd < 0 && err == -EBADF;
4377 }
4378
4379 enum kern_feature_result {
4380         FEAT_UNKNOWN = 0,
4381         FEAT_SUPPORTED = 1,
4382         FEAT_MISSING = 2,
4383 };
4384
4385 typedef int (*feature_probe_fn)(void);
4386
4387 static struct kern_feature_desc {
4388         const char *desc;
4389         feature_probe_fn probe;
4390         enum kern_feature_result res;
4391 } feature_probes[__FEAT_CNT] = {
4392         [FEAT_PROG_NAME] = {
4393                 "BPF program name", probe_kern_prog_name,
4394         },
4395         [FEAT_GLOBAL_DATA] = {
4396                 "global variables", probe_kern_global_data,
4397         },
4398         [FEAT_BTF] = {
4399                 "minimal BTF", probe_kern_btf,
4400         },
4401         [FEAT_BTF_FUNC] = {
4402                 "BTF functions", probe_kern_btf_func,
4403         },
4404         [FEAT_BTF_GLOBAL_FUNC] = {
4405                 "BTF global function", probe_kern_btf_func_global,
4406         },
4407         [FEAT_BTF_DATASEC] = {
4408                 "BTF data section and variable", probe_kern_btf_datasec,
4409         },
4410         [FEAT_ARRAY_MMAP] = {
4411                 "ARRAY map mmap()", probe_kern_array_mmap,
4412         },
4413         [FEAT_EXP_ATTACH_TYPE] = {
4414                 "BPF_PROG_LOAD expected_attach_type attribute",
4415                 probe_kern_exp_attach_type,
4416         },
4417         [FEAT_PROBE_READ_KERN] = {
4418                 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4419         },
4420         [FEAT_PROG_BIND_MAP] = {
4421                 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4422         },
4423         [FEAT_MODULE_BTF] = {
4424                 "module BTF support", probe_module_btf,
4425         },
4426         [FEAT_BTF_FLOAT] = {
4427                 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4428         },
4429         [FEAT_PERF_LINK] = {
4430                 "BPF perf link support", probe_perf_link,
4431         },
4432 };
4433
4434 static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4435 {
4436         struct kern_feature_desc *feat = &feature_probes[feat_id];
4437         int ret;
4438
4439         if (obj->gen_loader)
4440                 /* To generate loader program assume the latest kernel
4441                  * to avoid doing extra prog_load, map_create syscalls.
4442                  */
4443                 return true;
4444
4445         if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4446                 ret = feat->probe();
4447                 if (ret > 0) {
4448                         WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4449                 } else if (ret == 0) {
4450                         WRITE_ONCE(feat->res, FEAT_MISSING);
4451                 } else {
4452                         pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4453                         WRITE_ONCE(feat->res, FEAT_MISSING);
4454                 }
4455         }
4456
4457         return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4458 }
4459
4460 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4461 {
4462         struct bpf_map_info map_info = {};
4463         char msg[STRERR_BUFSIZE];
4464         __u32 map_info_len;
4465         int err;
4466
4467         map_info_len = sizeof(map_info);
4468
4469         err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
4470         if (err && errno == EINVAL)
4471                 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4472         if (err) {
4473                 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4474                         libbpf_strerror_r(errno, msg, sizeof(msg)));
4475                 return false;
4476         }
4477
4478         return (map_info.type == map->def.type &&
4479                 map_info.key_size == map->def.key_size &&
4480                 map_info.value_size == map->def.value_size &&
4481                 map_info.max_entries == map->def.max_entries &&
4482                 map_info.map_flags == map->def.map_flags);
4483 }
4484
4485 static int
4486 bpf_object__reuse_map(struct bpf_map *map)
4487 {
4488         char *cp, errmsg[STRERR_BUFSIZE];
4489         int err, pin_fd;
4490
4491         pin_fd = bpf_obj_get(map->pin_path);
4492         if (pin_fd < 0) {
4493                 err = -errno;
4494                 if (err == -ENOENT) {
4495                         pr_debug("found no pinned map to reuse at '%s'\n",
4496                                  map->pin_path);
4497                         return 0;
4498                 }
4499
4500                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4501                 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4502                         map->pin_path, cp);
4503                 return err;
4504         }
4505
4506         if (!map_is_reuse_compat(map, pin_fd)) {
4507                 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4508                         map->pin_path);
4509                 close(pin_fd);
4510                 return -EINVAL;
4511         }
4512
4513         err = bpf_map__reuse_fd(map, pin_fd);
4514         if (err) {
4515                 close(pin_fd);
4516                 return err;
4517         }
4518         map->pinned = true;
4519         pr_debug("reused pinned map at '%s'\n", map->pin_path);
4520
4521         return 0;
4522 }
4523
4524 static int
4525 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4526 {
4527         enum libbpf_map_type map_type = map->libbpf_type;
4528         char *cp, errmsg[STRERR_BUFSIZE];
4529         int err, zero = 0;
4530
4531         if (obj->gen_loader) {
4532                 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4533                                          map->mmaped, map->def.value_size);
4534                 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4535                         bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4536                 return 0;
4537         }
4538         err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4539         if (err) {
4540                 err = -errno;
4541                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4542                 pr_warn("Error setting initial map(%s) contents: %s\n",
4543                         map->name, cp);
4544                 return err;
4545         }
4546
4547         /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4548         if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4549                 err = bpf_map_freeze(map->fd);
4550                 if (err) {
4551                         err = -errno;
4552                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4553                         pr_warn("Error freezing map(%s) as read-only: %s\n",
4554                                 map->name, cp);
4555                         return err;
4556                 }
4557         }
4558         return 0;
4559 }
4560
4561 static void bpf_map__destroy(struct bpf_map *map);
4562
4563 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4564 {
4565         struct bpf_create_map_attr create_attr;
4566         struct bpf_map_def *def = &map->def;
4567         int err = 0;
4568
4569         memset(&create_attr, 0, sizeof(create_attr));
4570
4571         if (kernel_supports(obj, FEAT_PROG_NAME))
4572                 create_attr.name = map->name;
4573         create_attr.map_ifindex = map->map_ifindex;
4574         create_attr.map_type = def->type;
4575         create_attr.map_flags = def->map_flags;
4576         create_attr.key_size = def->key_size;
4577         create_attr.value_size = def->value_size;
4578         create_attr.numa_node = map->numa_node;
4579
4580         if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4581                 int nr_cpus;
4582
4583                 nr_cpus = libbpf_num_possible_cpus();
4584                 if (nr_cpus < 0) {
4585                         pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4586                                 map->name, nr_cpus);
4587                         return nr_cpus;
4588                 }
4589                 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4590                 create_attr.max_entries = nr_cpus;
4591         } else {
4592                 create_attr.max_entries = def->max_entries;
4593         }
4594
4595         if (bpf_map__is_struct_ops(map))
4596                 create_attr.btf_vmlinux_value_type_id =
4597                         map->btf_vmlinux_value_type_id;
4598
4599         create_attr.btf_fd = 0;
4600         create_attr.btf_key_type_id = 0;
4601         create_attr.btf_value_type_id = 0;
4602         if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4603                 create_attr.btf_fd = btf__fd(obj->btf);
4604                 create_attr.btf_key_type_id = map->btf_key_type_id;
4605                 create_attr.btf_value_type_id = map->btf_value_type_id;
4606         }
4607
4608         if (bpf_map_type__is_map_in_map(def->type)) {
4609                 if (map->inner_map) {
4610                         err = bpf_object__create_map(obj, map->inner_map, true);
4611                         if (err) {
4612                                 pr_warn("map '%s': failed to create inner map: %d\n",
4613                                         map->name, err);
4614                                 return err;
4615                         }
4616                         map->inner_map_fd = bpf_map__fd(map->inner_map);
4617                 }
4618                 if (map->inner_map_fd >= 0)
4619                         create_attr.inner_map_fd = map->inner_map_fd;
4620         }
4621
4622         if (obj->gen_loader) {
4623                 bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
4624                 /* Pretend to have valid FD to pass various fd >= 0 checks.
4625                  * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
4626                  */
4627                 map->fd = 0;
4628         } else {
4629                 map->fd = bpf_create_map_xattr(&create_attr);
4630         }
4631         if (map->fd < 0 && (create_attr.btf_key_type_id ||
4632                             create_attr.btf_value_type_id)) {
4633                 char *cp, errmsg[STRERR_BUFSIZE];
4634
4635                 err = -errno;
4636                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4637                 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4638                         map->name, cp, err);
4639                 create_attr.btf_fd = 0;
4640                 create_attr.btf_key_type_id = 0;
4641                 create_attr.btf_value_type_id = 0;
4642                 map->btf_key_type_id = 0;
4643                 map->btf_value_type_id = 0;
4644                 map->fd = bpf_create_map_xattr(&create_attr);
4645         }
4646
4647         err = map->fd < 0 ? -errno : 0;
4648
4649         if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4650                 if (obj->gen_loader)
4651                         map->inner_map->fd = -1;
4652                 bpf_map__destroy(map->inner_map);
4653                 zfree(&map->inner_map);
4654         }
4655
4656         return err;
4657 }
4658
4659 static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
4660 {
4661         const struct bpf_map *targ_map;
4662         unsigned int i;
4663         int fd, err = 0;
4664
4665         for (i = 0; i < map->init_slots_sz; i++) {
4666                 if (!map->init_slots[i])
4667                         continue;
4668
4669                 targ_map = map->init_slots[i];
4670                 fd = bpf_map__fd(targ_map);
4671                 if (obj->gen_loader) {
4672                         pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n",
4673                                 map - obj->maps, i, targ_map - obj->maps);
4674                         return -ENOTSUP;
4675                 } else {
4676                         err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4677                 }
4678                 if (err) {
4679                         err = -errno;
4680                         pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4681                                 map->name, i, targ_map->name,
4682                                 fd, err);
4683                         return err;
4684                 }
4685                 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4686                          map->name, i, targ_map->name, fd);
4687         }
4688
4689         zfree(&map->init_slots);
4690         map->init_slots_sz = 0;
4691
4692         return 0;
4693 }
4694
4695 static int
4696 bpf_object__create_maps(struct bpf_object *obj)
4697 {
4698         struct bpf_map *map;
4699         char *cp, errmsg[STRERR_BUFSIZE];
4700         unsigned int i, j;
4701         int err;
4702         bool retried;
4703
4704         for (i = 0; i < obj->nr_maps; i++) {
4705                 map = &obj->maps[i];
4706
4707                 retried = false;
4708 retry:
4709                 if (map->pin_path) {
4710                         err = bpf_object__reuse_map(map);
4711                         if (err) {
4712                                 pr_warn("map '%s': error reusing pinned map\n",
4713                                         map->name);
4714                                 goto err_out;
4715                         }
4716                         if (retried && map->fd < 0) {
4717                                 pr_warn("map '%s': cannot find pinned map\n",
4718                                         map->name);
4719                                 err = -ENOENT;
4720                                 goto err_out;
4721                         }
4722                 }
4723
4724                 if (map->fd >= 0) {
4725                         pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4726                                  map->name, map->fd);
4727                 } else {
4728                         err = bpf_object__create_map(obj, map, false);
4729                         if (err)
4730                                 goto err_out;
4731
4732                         pr_debug("map '%s': created successfully, fd=%d\n",
4733                                  map->name, map->fd);
4734
4735                         if (bpf_map__is_internal(map)) {
4736                                 err = bpf_object__populate_internal_map(obj, map);
4737                                 if (err < 0) {
4738                                         zclose(map->fd);
4739                                         goto err_out;
4740                                 }
4741                         }
4742
4743                         if (map->init_slots_sz) {
4744                                 err = init_map_slots(obj, map);
4745                                 if (err < 0) {
4746                                         zclose(map->fd);
4747                                         goto err_out;
4748                                 }
4749                         }
4750                 }
4751
4752                 if (map->pin_path && !map->pinned) {
4753                         err = bpf_map__pin(map, NULL);
4754                         if (err) {
4755                                 zclose(map->fd);
4756                                 if (!retried && err == -EEXIST) {
4757                                         retried = true;
4758                                         goto retry;
4759                                 }
4760                                 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4761                                         map->name, map->pin_path, err);
4762                                 goto err_out;
4763                         }
4764                 }
4765         }
4766
4767         return 0;
4768
4769 err_out:
4770         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4771         pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4772         pr_perm_msg(err);
4773         for (j = 0; j < i; j++)
4774                 zclose(obj->maps[j].fd);
4775         return err;
4776 }
4777
4778 static bool bpf_core_is_flavor_sep(const char *s)
4779 {
4780         /* check X___Y name pattern, where X and Y are not underscores */
4781         return s[0] != '_' &&                                 /* X */
4782                s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
4783                s[4] != '_';                                   /* Y */
4784 }
4785
4786 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
4787  * before last triple underscore. Struct name part after last triple
4788  * underscore is ignored by BPF CO-RE relocation during relocation matching.
4789  */
4790 size_t bpf_core_essential_name_len(const char *name)
4791 {
4792         size_t n = strlen(name);
4793         int i;
4794
4795         for (i = n - 5; i >= 0; i--) {
4796                 if (bpf_core_is_flavor_sep(name + i))
4797                         return i + 1;
4798         }
4799         return n;
4800 }
4801
4802 static void bpf_core_free_cands(struct bpf_core_cand_list *cands)
4803 {
4804         free(cands->cands);
4805         free(cands);
4806 }
4807
4808 static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
4809                               size_t local_essent_len,
4810                               const struct btf *targ_btf,
4811                               const char *targ_btf_name,
4812                               int targ_start_id,
4813                               struct bpf_core_cand_list *cands)
4814 {
4815         struct bpf_core_cand *new_cands, *cand;
4816         const struct btf_type *t;
4817         const char *targ_name;
4818         size_t targ_essent_len;
4819         int n, i;
4820
4821         n = btf__get_nr_types(targ_btf);
4822         for (i = targ_start_id; i <= n; i++) {
4823                 t = btf__type_by_id(targ_btf, i);
4824                 if (btf_kind(t) != btf_kind(local_cand->t))
4825                         continue;
4826
4827                 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4828                 if (str_is_empty(targ_name))
4829                         continue;
4830
4831                 targ_essent_len = bpf_core_essential_name_len(targ_name);
4832                 if (targ_essent_len != local_essent_len)
4833                         continue;
4834
4835                 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4836                         continue;
4837
4838                 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4839                          local_cand->id, btf_kind_str(local_cand->t),
4840                          local_cand->name, i, btf_kind_str(t), targ_name,
4841                          targ_btf_name);
4842                 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4843                                               sizeof(*cands->cands));
4844                 if (!new_cands)
4845                         return -ENOMEM;
4846
4847                 cand = &new_cands[cands->len];
4848                 cand->btf = targ_btf;
4849                 cand->t = t;
4850                 cand->name = targ_name;
4851                 cand->id = i;
4852
4853                 cands->cands = new_cands;
4854                 cands->len++;
4855         }
4856         return 0;
4857 }
4858
4859 static int load_module_btfs(struct bpf_object *obj)
4860 {
4861         struct bpf_btf_info info;
4862         struct module_btf *mod_btf;
4863         struct btf *btf;
4864         char name[64];
4865         __u32 id = 0, len;
4866         int err, fd;
4867
4868         if (obj->btf_modules_loaded)
4869                 return 0;
4870
4871         if (obj->gen_loader)
4872                 return 0;
4873
4874         /* don't do this again, even if we find no module BTFs */
4875         obj->btf_modules_loaded = true;
4876
4877         /* kernel too old to support module BTFs */
4878         if (!kernel_supports(obj, FEAT_MODULE_BTF))
4879                 return 0;
4880
4881         while (true) {
4882                 err = bpf_btf_get_next_id(id, &id);
4883                 if (err && errno == ENOENT)
4884                         return 0;
4885                 if (err) {
4886                         err = -errno;
4887                         pr_warn("failed to iterate BTF objects: %d\n", err);
4888                         return err;
4889                 }
4890
4891                 fd = bpf_btf_get_fd_by_id(id);
4892                 if (fd < 0) {
4893                         if (errno == ENOENT)
4894                                 continue; /* expected race: BTF was unloaded */
4895                         err = -errno;
4896                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4897                         return err;
4898                 }
4899
4900                 len = sizeof(info);
4901                 memset(&info, 0, sizeof(info));
4902                 info.name = ptr_to_u64(name);
4903                 info.name_len = sizeof(name);
4904
4905                 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4906                 if (err) {
4907                         err = -errno;
4908                         pr_warn("failed to get BTF object #%d info: %d\n", id, err);
4909                         goto err_out;
4910                 }
4911
4912                 /* ignore non-module BTFs */
4913                 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
4914                         close(fd);
4915                         continue;
4916                 }
4917
4918                 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
4919                 err = libbpf_get_error(btf);
4920                 if (err) {
4921                         pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
4922                                 name, id, err);
4923                         goto err_out;
4924                 }
4925
4926                 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
4927                                         sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4928                 if (err)
4929                         goto err_out;
4930
4931                 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
4932
4933                 mod_btf->btf = btf;
4934                 mod_btf->id = id;
4935                 mod_btf->fd = fd;
4936                 mod_btf->name = strdup(name);
4937                 if (!mod_btf->name) {
4938                         err = -ENOMEM;
4939                         goto err_out;
4940                 }
4941                 continue;
4942
4943 err_out:
4944                 close(fd);
4945                 return err;
4946         }
4947
4948         return 0;
4949 }
4950
4951 static struct bpf_core_cand_list *
4952 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
4953 {
4954         struct bpf_core_cand local_cand = {};
4955         struct bpf_core_cand_list *cands;
4956         const struct btf *main_btf;
4957         size_t local_essent_len;
4958         int err, i;
4959
4960         local_cand.btf = local_btf;
4961         local_cand.t = btf__type_by_id(local_btf, local_type_id);
4962         if (!local_cand.t)
4963                 return ERR_PTR(-EINVAL);
4964
4965         local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
4966         if (str_is_empty(local_cand.name))
4967                 return ERR_PTR(-EINVAL);
4968         local_essent_len = bpf_core_essential_name_len(local_cand.name);
4969
4970         cands = calloc(1, sizeof(*cands));
4971         if (!cands)
4972                 return ERR_PTR(-ENOMEM);
4973
4974         /* Attempt to find target candidates in vmlinux BTF first */
4975         main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
4976         err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
4977         if (err)
4978                 goto err_out;
4979
4980         /* if vmlinux BTF has any candidate, don't got for module BTFs */
4981         if (cands->len)
4982                 return cands;
4983
4984         /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
4985         if (obj->btf_vmlinux_override)
4986                 return cands;
4987
4988         /* now look through module BTFs, trying to still find candidates */
4989         err = load_module_btfs(obj);
4990         if (err)
4991                 goto err_out;
4992
4993         for (i = 0; i < obj->btf_module_cnt; i++) {
4994                 err = bpf_core_add_cands(&local_cand, local_essent_len,
4995                                          obj->btf_modules[i].btf,
4996                                          obj->btf_modules[i].name,
4997                                          btf__get_nr_types(obj->btf_vmlinux) + 1,
4998                                          cands);
4999                 if (err)
5000                         goto err_out;
5001         }
5002
5003         return cands;
5004 err_out:
5005         bpf_core_free_cands(cands);
5006         return ERR_PTR(err);
5007 }
5008
5009 /* Check local and target types for compatibility. This check is used for
5010  * type-based CO-RE relocations and follow slightly different rules than
5011  * field-based relocations. This function assumes that root types were already
5012  * checked for name match. Beyond that initial root-level name check, names
5013  * are completely ignored. Compatibility rules are as follows:
5014  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5015  *     kind should match for local and target types (i.e., STRUCT is not
5016  *     compatible with UNION);
5017  *   - for ENUMs, the size is ignored;
5018  *   - for INT, size and signedness are ignored;
5019  *   - for ARRAY, dimensionality is ignored, element types are checked for
5020  *     compatibility recursively;
5021  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5022  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5023  *   - FUNC_PROTOs are compatible if they have compatible signature: same
5024  *     number of input args and compatible return and argument types.
5025  * These rules are not set in stone and probably will be adjusted as we get
5026  * more experience with using BPF CO-RE relocations.
5027  */
5028 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5029                               const struct btf *targ_btf, __u32 targ_id)
5030 {
5031         const struct btf_type *local_type, *targ_type;
5032         int depth = 32; /* max recursion depth */
5033
5034         /* caller made sure that names match (ignoring flavor suffix) */
5035         local_type = btf__type_by_id(local_btf, local_id);
5036         targ_type = btf__type_by_id(targ_btf, targ_id);
5037         if (btf_kind(local_type) != btf_kind(targ_type))
5038                 return 0;
5039
5040 recur:
5041         depth--;
5042         if (depth < 0)
5043                 return -EINVAL;
5044
5045         local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5046         targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5047         if (!local_type || !targ_type)
5048                 return -EINVAL;
5049
5050         if (btf_kind(local_type) != btf_kind(targ_type))
5051                 return 0;
5052
5053         switch (btf_kind(local_type)) {
5054         case BTF_KIND_UNKN:
5055         case BTF_KIND_STRUCT:
5056         case BTF_KIND_UNION:
5057         case BTF_KIND_ENUM:
5058         case BTF_KIND_FWD:
5059                 return 1;
5060         case BTF_KIND_INT:
5061                 /* just reject deprecated bitfield-like integers; all other
5062                  * integers are by default compatible between each other
5063                  */
5064                 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5065         case BTF_KIND_PTR:
5066                 local_id = local_type->type;
5067                 targ_id = targ_type->type;
5068                 goto recur;
5069         case BTF_KIND_ARRAY:
5070                 local_id = btf_array(local_type)->type;
5071                 targ_id = btf_array(targ_type)->type;
5072                 goto recur;
5073         case BTF_KIND_FUNC_PROTO: {
5074                 struct btf_param *local_p = btf_params(local_type);
5075                 struct btf_param *targ_p = btf_params(targ_type);
5076                 __u16 local_vlen = btf_vlen(local_type);
5077                 __u16 targ_vlen = btf_vlen(targ_type);
5078                 int i, err;
5079
5080                 if (local_vlen != targ_vlen)
5081                         return 0;
5082
5083                 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5084                         skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5085                         skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5086                         err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5087                         if (err <= 0)
5088                                 return err;
5089                 }
5090
5091                 /* tail recurse for return type check */
5092                 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5093                 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5094                 goto recur;
5095         }
5096         default:
5097                 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5098                         btf_kind_str(local_type), local_id, targ_id);
5099                 return 0;
5100         }
5101 }
5102
5103 static size_t bpf_core_hash_fn(const void *key, void *ctx)
5104 {
5105         return (size_t)key;
5106 }
5107
5108 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5109 {
5110         return k1 == k2;
5111 }
5112
5113 static void *u32_as_hash_key(__u32 x)
5114 {
5115         return (void *)(uintptr_t)x;
5116 }
5117
5118 static int bpf_core_apply_relo(struct bpf_program *prog,
5119                                const struct bpf_core_relo *relo,
5120                                int relo_idx,
5121                                const struct btf *local_btf,
5122                                struct hashmap *cand_cache)
5123 {
5124         const void *type_key = u32_as_hash_key(relo->type_id);
5125         struct bpf_core_cand_list *cands = NULL;
5126         const char *prog_name = prog->name;
5127         const struct btf_type *local_type;
5128         const char *local_name;
5129         __u32 local_id = relo->type_id;
5130         struct bpf_insn *insn;
5131         int insn_idx, err;
5132
5133         if (relo->insn_off % BPF_INSN_SZ)
5134                 return -EINVAL;
5135         insn_idx = relo->insn_off / BPF_INSN_SZ;
5136         /* adjust insn_idx from section frame of reference to the local
5137          * program's frame of reference; (sub-)program code is not yet
5138          * relocated, so it's enough to just subtract in-section offset
5139          */
5140         insn_idx = insn_idx - prog->sec_insn_off;
5141         if (insn_idx >= prog->insns_cnt)
5142                 return -EINVAL;
5143         insn = &prog->insns[insn_idx];
5144
5145         local_type = btf__type_by_id(local_btf, local_id);
5146         if (!local_type)
5147                 return -EINVAL;
5148
5149         local_name = btf__name_by_offset(local_btf, local_type->name_off);
5150         if (!local_name)
5151                 return -EINVAL;
5152
5153         if (prog->obj->gen_loader) {
5154                 pr_warn("// TODO core_relo: prog %td insn[%d] %s kind %d\n",
5155                         prog - prog->obj->programs, relo->insn_off / 8,
5156                         local_name, relo->kind);
5157                 return -ENOTSUP;
5158         }
5159
5160         if (relo->kind != BPF_TYPE_ID_LOCAL &&
5161             !hashmap__find(cand_cache, type_key, (void **)&cands)) {
5162                 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5163                 if (IS_ERR(cands)) {
5164                         pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5165                                 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5166                                 local_name, PTR_ERR(cands));
5167                         return PTR_ERR(cands);
5168                 }
5169                 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5170                 if (err) {
5171                         bpf_core_free_cands(cands);
5172                         return err;
5173                 }
5174         }
5175
5176         return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, relo_idx, local_btf, cands);
5177 }
5178
5179 static int
5180 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5181 {
5182         const struct btf_ext_info_sec *sec;
5183         const struct bpf_core_relo *rec;
5184         const struct btf_ext_info *seg;
5185         struct hashmap_entry *entry;
5186         struct hashmap *cand_cache = NULL;
5187         struct bpf_program *prog;
5188         const char *sec_name;
5189         int i, err = 0, insn_idx, sec_idx;
5190
5191         if (obj->btf_ext->core_relo_info.len == 0)
5192                 return 0;
5193
5194         if (targ_btf_path) {
5195                 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5196                 err = libbpf_get_error(obj->btf_vmlinux_override);
5197                 if (err) {
5198                         pr_warn("failed to parse target BTF: %d\n", err);
5199                         return err;
5200                 }
5201         }
5202
5203         cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5204         if (IS_ERR(cand_cache)) {
5205                 err = PTR_ERR(cand_cache);
5206                 goto out;
5207         }
5208
5209         seg = &obj->btf_ext->core_relo_info;
5210         for_each_btf_ext_sec(seg, sec) {
5211                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5212                 if (str_is_empty(sec_name)) {
5213                         err = -EINVAL;
5214                         goto out;
5215                 }
5216                 /* bpf_object's ELF is gone by now so it's not easy to find
5217                  * section index by section name, but we can find *any*
5218                  * bpf_program within desired section name and use it's
5219                  * prog->sec_idx to do a proper search by section index and
5220                  * instruction offset
5221                  */
5222                 prog = NULL;
5223                 for (i = 0; i < obj->nr_programs; i++) {
5224                         prog = &obj->programs[i];
5225                         if (strcmp(prog->sec_name, sec_name) == 0)
5226                                 break;
5227                 }
5228                 if (!prog) {
5229                         pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
5230                         return -ENOENT;
5231                 }
5232                 sec_idx = prog->sec_idx;
5233
5234                 pr_debug("sec '%s': found %d CO-RE relocations\n",
5235                          sec_name, sec->num_info);
5236
5237                 for_each_btf_ext_rec(seg, sec, i, rec) {
5238                         insn_idx = rec->insn_off / BPF_INSN_SZ;
5239                         prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5240                         if (!prog) {
5241                                 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
5242                                         sec_name, insn_idx, i);
5243                                 err = -EINVAL;
5244                                 goto out;
5245                         }
5246                         /* no need to apply CO-RE relocation if the program is
5247                          * not going to be loaded
5248                          */
5249                         if (!prog->load)
5250                                 continue;
5251
5252                         err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
5253                         if (err) {
5254                                 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5255                                         prog->name, i, err);
5256                                 goto out;
5257                         }
5258                 }
5259         }
5260
5261 out:
5262         /* obj->btf_vmlinux and module BTFs are freed after object load */
5263         btf__free(obj->btf_vmlinux_override);
5264         obj->btf_vmlinux_override = NULL;
5265
5266         if (!IS_ERR_OR_NULL(cand_cache)) {
5267                 hashmap__for_each_entry(cand_cache, entry, i) {
5268                         bpf_core_free_cands(entry->value);
5269                 }
5270                 hashmap__free(cand_cache);
5271         }
5272         return err;
5273 }
5274
5275 /* Relocate data references within program code:
5276  *  - map references;
5277  *  - global variable references;
5278  *  - extern references.
5279  */
5280 static int
5281 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5282 {
5283         int i;
5284
5285         for (i = 0; i < prog->nr_reloc; i++) {
5286                 struct reloc_desc *relo = &prog->reloc_desc[i];
5287                 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5288                 struct extern_desc *ext;
5289
5290                 switch (relo->type) {
5291                 case RELO_LD64:
5292                         if (obj->gen_loader) {
5293                                 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5294                                 insn[0].imm = relo->map_idx;
5295                         } else {
5296                                 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5297                                 insn[0].imm = obj->maps[relo->map_idx].fd;
5298                         }
5299                         break;
5300                 case RELO_DATA:
5301                         insn[1].imm = insn[0].imm + relo->sym_off;
5302                         if (obj->gen_loader) {
5303                                 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5304                                 insn[0].imm = relo->map_idx;
5305                         } else {
5306                                 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5307                                 insn[0].imm = obj->maps[relo->map_idx].fd;
5308                         }
5309                         break;
5310                 case RELO_EXTERN_VAR:
5311                         ext = &obj->externs[relo->sym_off];
5312                         if (ext->type == EXT_KCFG) {
5313                                 if (obj->gen_loader) {
5314                                         insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5315                                         insn[0].imm = obj->kconfig_map_idx;
5316                                 } else {
5317                                         insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5318                                         insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5319                                 }
5320                                 insn[1].imm = ext->kcfg.data_off;
5321                         } else /* EXT_KSYM */ {
5322                                 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5323                                         insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5324                                         insn[0].imm = ext->ksym.kernel_btf_id;
5325                                         insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5326                                 } else { /* typeless ksyms or unresolved typed ksyms */
5327                                         insn[0].imm = (__u32)ext->ksym.addr;
5328                                         insn[1].imm = ext->ksym.addr >> 32;
5329                                 }
5330                         }
5331                         break;
5332                 case RELO_EXTERN_FUNC:
5333                         ext = &obj->externs[relo->sym_off];
5334                         insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
5335                         insn[0].imm = ext->ksym.kernel_btf_id;
5336                         break;
5337                 case RELO_SUBPROG_ADDR:
5338                         if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
5339                                 pr_warn("prog '%s': relo #%d: bad insn\n",
5340                                         prog->name, i);
5341                                 return -EINVAL;
5342                         }
5343                         /* handled already */
5344                         break;
5345                 case RELO_CALL:
5346                         /* handled already */
5347                         break;
5348                 default:
5349                         pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5350                                 prog->name, i, relo->type);
5351                         return -EINVAL;
5352                 }
5353         }
5354
5355         return 0;
5356 }
5357
5358 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5359                                     const struct bpf_program *prog,
5360                                     const struct btf_ext_info *ext_info,
5361                                     void **prog_info, __u32 *prog_rec_cnt,
5362                                     __u32 *prog_rec_sz)
5363 {
5364         void *copy_start = NULL, *copy_end = NULL;
5365         void *rec, *rec_end, *new_prog_info;
5366         const struct btf_ext_info_sec *sec;
5367         size_t old_sz, new_sz;
5368         const char *sec_name;
5369         int i, off_adj;
5370
5371         for_each_btf_ext_sec(ext_info, sec) {
5372                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5373                 if (!sec_name)
5374                         return -EINVAL;
5375                 if (strcmp(sec_name, prog->sec_name) != 0)
5376                         continue;
5377
5378                 for_each_btf_ext_rec(ext_info, sec, i, rec) {
5379                         __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
5380
5381                         if (insn_off < prog->sec_insn_off)
5382                                 continue;
5383                         if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
5384                                 break;
5385
5386                         if (!copy_start)
5387                                 copy_start = rec;
5388                         copy_end = rec + ext_info->rec_size;
5389                 }
5390
5391                 if (!copy_start)
5392                         return -ENOENT;
5393
5394                 /* append func/line info of a given (sub-)program to the main
5395                  * program func/line info
5396                  */
5397                 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
5398                 new_sz = old_sz + (copy_end - copy_start);
5399                 new_prog_info = realloc(*prog_info, new_sz);
5400                 if (!new_prog_info)
5401                         return -ENOMEM;
5402                 *prog_info = new_prog_info;
5403                 *prog_rec_cnt = new_sz / ext_info->rec_size;
5404                 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
5405
5406                 /* Kernel instruction offsets are in units of 8-byte
5407                  * instructions, while .BTF.ext instruction offsets generated
5408                  * by Clang are in units of bytes. So convert Clang offsets
5409                  * into kernel offsets and adjust offset according to program
5410                  * relocated position.
5411                  */
5412                 off_adj = prog->sub_insn_off - prog->sec_insn_off;
5413                 rec = new_prog_info + old_sz;
5414                 rec_end = new_prog_info + new_sz;
5415                 for (; rec < rec_end; rec += ext_info->rec_size) {
5416                         __u32 *insn_off = rec;
5417
5418                         *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
5419                 }
5420                 *prog_rec_sz = ext_info->rec_size;
5421                 return 0;
5422         }
5423
5424         return -ENOENT;
5425 }
5426
5427 static int
5428 reloc_prog_func_and_line_info(const struct bpf_object *obj,
5429                               struct bpf_program *main_prog,
5430                               const struct bpf_program *prog)
5431 {
5432         int err;
5433
5434         /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
5435          * supprot func/line info
5436          */
5437         if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
5438                 return 0;
5439
5440         /* only attempt func info relocation if main program's func_info
5441          * relocation was successful
5442          */
5443         if (main_prog != prog && !main_prog->func_info)
5444                 goto line_info;
5445
5446         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
5447                                        &main_prog->func_info,
5448                                        &main_prog->func_info_cnt,
5449                                        &main_prog->func_info_rec_size);
5450         if (err) {
5451                 if (err != -ENOENT) {
5452                         pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
5453                                 prog->name, err);
5454                         return err;
5455                 }
5456                 if (main_prog->func_info) {
5457                         /*
5458                          * Some info has already been found but has problem
5459                          * in the last btf_ext reloc. Must have to error out.
5460                          */
5461                         pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
5462                         return err;
5463                 }
5464                 /* Have problem loading the very first info. Ignore the rest. */
5465                 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
5466                         prog->name);
5467         }
5468
5469 line_info:
5470         /* don't relocate line info if main program's relocation failed */
5471         if (main_prog != prog && !main_prog->line_info)
5472                 return 0;
5473
5474         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
5475                                        &main_prog->line_info,
5476                                        &main_prog->line_info_cnt,
5477                                        &main_prog->line_info_rec_size);
5478         if (err) {
5479                 if (err != -ENOENT) {
5480                         pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
5481                                 prog->name, err);
5482                         return err;
5483                 }
5484                 if (main_prog->line_info) {
5485                         /*
5486                          * Some info has already been found but has problem
5487                          * in the last btf_ext reloc. Must have to error out.
5488                          */
5489                         pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
5490                         return err;
5491                 }
5492                 /* Have problem loading the very first info. Ignore the rest. */
5493                 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
5494                         prog->name);
5495         }
5496         return 0;
5497 }
5498
5499 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
5500 {
5501         size_t insn_idx = *(const size_t *)key;
5502         const struct reloc_desc *relo = elem;
5503
5504         if (insn_idx == relo->insn_idx)
5505                 return 0;
5506         return insn_idx < relo->insn_idx ? -1 : 1;
5507 }
5508
5509 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
5510 {
5511         return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
5512                        sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
5513 }
5514
5515 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
5516 {
5517         int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
5518         struct reloc_desc *relos;
5519         int i;
5520
5521         if (main_prog == subprog)
5522                 return 0;
5523         relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
5524         if (!relos)
5525                 return -ENOMEM;
5526         memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
5527                sizeof(*relos) * subprog->nr_reloc);
5528
5529         for (i = main_prog->nr_reloc; i < new_cnt; i++)
5530                 relos[i].insn_idx += subprog->sub_insn_off;
5531         /* After insn_idx adjustment the 'relos' array is still sorted
5532          * by insn_idx and doesn't break bsearch.
5533          */
5534         main_prog->reloc_desc = relos;
5535         main_prog->nr_reloc = new_cnt;
5536         return 0;
5537 }
5538
5539 static int
5540 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
5541                        struct bpf_program *prog)
5542 {
5543         size_t sub_insn_idx, insn_idx, new_cnt;
5544         struct bpf_program *subprog;
5545         struct bpf_insn *insns, *insn;
5546         struct reloc_desc *relo;
5547         int err;
5548
5549         err = reloc_prog_func_and_line_info(obj, main_prog, prog);
5550         if (err)
5551                 return err;
5552
5553         for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
5554                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
5555                 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
5556                         continue;
5557
5558                 relo = find_prog_insn_relo(prog, insn_idx);
5559                 if (relo && relo->type == RELO_EXTERN_FUNC)
5560                         /* kfunc relocations will be handled later
5561                          * in bpf_object__relocate_data()
5562                          */
5563                         continue;
5564                 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
5565                         pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
5566                                 prog->name, insn_idx, relo->type);
5567                         return -LIBBPF_ERRNO__RELOC;
5568                 }
5569                 if (relo) {
5570                         /* sub-program instruction index is a combination of
5571                          * an offset of a symbol pointed to by relocation and
5572                          * call instruction's imm field; for global functions,
5573                          * call always has imm = -1, but for static functions
5574                          * relocation is against STT_SECTION and insn->imm
5575                          * points to a start of a static function
5576                          *
5577                          * for subprog addr relocation, the relo->sym_off + insn->imm is
5578                          * the byte offset in the corresponding section.
5579                          */
5580                         if (relo->type == RELO_CALL)
5581                                 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
5582                         else
5583                                 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
5584                 } else if (insn_is_pseudo_func(insn)) {
5585                         /*
5586                          * RELO_SUBPROG_ADDR relo is always emitted even if both
5587                          * functions are in the same section, so it shouldn't reach here.
5588                          */
5589                         pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
5590                                 prog->name, insn_idx);
5591                         return -LIBBPF_ERRNO__RELOC;
5592                 } else {
5593                         /* if subprogram call is to a static function within
5594                          * the same ELF section, there won't be any relocation
5595                          * emitted, but it also means there is no additional
5596                          * offset necessary, insns->imm is relative to
5597                          * instruction's original position within the section
5598                          */
5599                         sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
5600                 }
5601
5602                 /* we enforce that sub-programs should be in .text section */
5603                 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
5604                 if (!subprog) {
5605                         pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
5606                                 prog->name);
5607                         return -LIBBPF_ERRNO__RELOC;
5608                 }
5609
5610                 /* if it's the first call instruction calling into this
5611                  * subprogram (meaning this subprog hasn't been processed
5612                  * yet) within the context of current main program:
5613                  *   - append it at the end of main program's instructions blog;
5614                  *   - process is recursively, while current program is put on hold;
5615                  *   - if that subprogram calls some other not yet processes
5616                  *   subprogram, same thing will happen recursively until
5617                  *   there are no more unprocesses subprograms left to append
5618                  *   and relocate.
5619                  */
5620                 if (subprog->sub_insn_off == 0) {
5621                         subprog->sub_insn_off = main_prog->insns_cnt;
5622
5623                         new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
5624                         insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
5625                         if (!insns) {
5626                                 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
5627                                 return -ENOMEM;
5628                         }
5629                         main_prog->insns = insns;
5630                         main_prog->insns_cnt = new_cnt;
5631
5632                         memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
5633                                subprog->insns_cnt * sizeof(*insns));
5634
5635                         pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
5636                                  main_prog->name, subprog->insns_cnt, subprog->name);
5637
5638                         /* The subprog insns are now appended. Append its relos too. */
5639                         err = append_subprog_relos(main_prog, subprog);
5640                         if (err)
5641                                 return err;
5642                         err = bpf_object__reloc_code(obj, main_prog, subprog);
5643                         if (err)
5644                                 return err;
5645                 }
5646
5647                 /* main_prog->insns memory could have been re-allocated, so
5648                  * calculate pointer again
5649                  */
5650                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
5651                 /* calculate correct instruction position within current main
5652                  * prog; each main prog can have a different set of
5653                  * subprograms appended (potentially in different order as
5654                  * well), so position of any subprog can be different for
5655                  * different main programs */
5656                 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
5657
5658                 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
5659                          prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
5660         }
5661
5662         return 0;
5663 }
5664
5665 /*
5666  * Relocate sub-program calls.
5667  *
5668  * Algorithm operates as follows. Each entry-point BPF program (referred to as
5669  * main prog) is processed separately. For each subprog (non-entry functions,
5670  * that can be called from either entry progs or other subprogs) gets their
5671  * sub_insn_off reset to zero. This serves as indicator that this subprogram
5672  * hasn't been yet appended and relocated within current main prog. Once its
5673  * relocated, sub_insn_off will point at the position within current main prog
5674  * where given subprog was appended. This will further be used to relocate all
5675  * the call instructions jumping into this subprog.
5676  *
5677  * We start with main program and process all call instructions. If the call
5678  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
5679  * is zero), subprog instructions are appended at the end of main program's
5680  * instruction array. Then main program is "put on hold" while we recursively
5681  * process newly appended subprogram. If that subprogram calls into another
5682  * subprogram that hasn't been appended, new subprogram is appended again to
5683  * the *main* prog's instructions (subprog's instructions are always left
5684  * untouched, as they need to be in unmodified state for subsequent main progs
5685  * and subprog instructions are always sent only as part of a main prog) and
5686  * the process continues recursively. Once all the subprogs called from a main
5687  * prog or any of its subprogs are appended (and relocated), all their
5688  * positions within finalized instructions array are known, so it's easy to
5689  * rewrite call instructions with correct relative offsets, corresponding to
5690  * desired target subprog.
5691  *
5692  * Its important to realize that some subprogs might not be called from some
5693  * main prog and any of its called/used subprogs. Those will keep their
5694  * subprog->sub_insn_off as zero at all times and won't be appended to current
5695  * main prog and won't be relocated within the context of current main prog.
5696  * They might still be used from other main progs later.
5697  *
5698  * Visually this process can be shown as below. Suppose we have two main
5699  * programs mainA and mainB and BPF object contains three subprogs: subA,
5700  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
5701  * subC both call subB:
5702  *
5703  *        +--------+ +-------+
5704  *        |        v v       |
5705  *     +--+---+ +--+-+-+ +---+--+
5706  *     | subA | | subB | | subC |
5707  *     +--+---+ +------+ +---+--+
5708  *        ^                  ^
5709  *        |                  |
5710  *    +---+-------+   +------+----+
5711  *    |   mainA   |   |   mainB   |
5712  *    +-----------+   +-----------+
5713  *
5714  * We'll start relocating mainA, will find subA, append it and start
5715  * processing sub A recursively:
5716  *
5717  *    +-----------+------+
5718  *    |   mainA   | subA |
5719  *    +-----------+------+
5720  *
5721  * At this point we notice that subB is used from subA, so we append it and
5722  * relocate (there are no further subcalls from subB):
5723  *
5724  *    +-----------+------+------+
5725  *    |   mainA   | subA | subB |
5726  *    +-----------+------+------+
5727  *
5728  * At this point, we relocate subA calls, then go one level up and finish with
5729  * relocatin mainA calls. mainA is done.
5730  *
5731  * For mainB process is similar but results in different order. We start with
5732  * mainB and skip subA and subB, as mainB never calls them (at least
5733  * directly), but we see subC is needed, so we append and start processing it:
5734  *
5735  *    +-----------+------+
5736  *    |   mainB   | subC |
5737  *    +-----------+------+
5738  * Now we see subC needs subB, so we go back to it, append and relocate it:
5739  *
5740  *    +-----------+------+------+
5741  *    |   mainB   | subC | subB |
5742  *    +-----------+------+------+
5743  *
5744  * At this point we unwind recursion, relocate calls in subC, then in mainB.
5745  */
5746 static int
5747 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
5748 {
5749         struct bpf_program *subprog;
5750         int i, err;
5751
5752         /* mark all subprogs as not relocated (yet) within the context of
5753          * current main program
5754          */
5755         for (i = 0; i < obj->nr_programs; i++) {
5756                 subprog = &obj->programs[i];
5757                 if (!prog_is_subprog(obj, subprog))
5758                         continue;
5759
5760                 subprog->sub_insn_off = 0;
5761         }
5762
5763         err = bpf_object__reloc_code(obj, prog, prog);
5764         if (err)
5765                 return err;
5766
5767
5768         return 0;
5769 }
5770
5771 static void
5772 bpf_object__free_relocs(struct bpf_object *obj)
5773 {
5774         struct bpf_program *prog;
5775         int i;
5776
5777         /* free up relocation descriptors */
5778         for (i = 0; i < obj->nr_programs; i++) {
5779                 prog = &obj->programs[i];
5780                 zfree(&prog->reloc_desc);
5781                 prog->nr_reloc = 0;
5782         }
5783 }
5784
5785 static int
5786 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
5787 {
5788         struct bpf_program *prog;
5789         size_t i, j;
5790         int err;
5791
5792         if (obj->btf_ext) {
5793                 err = bpf_object__relocate_core(obj, targ_btf_path);
5794                 if (err) {
5795                         pr_warn("failed to perform CO-RE relocations: %d\n",
5796                                 err);
5797                         return err;
5798                 }
5799         }
5800
5801         /* Before relocating calls pre-process relocations and mark
5802          * few ld_imm64 instructions that points to subprogs.
5803          * Otherwise bpf_object__reloc_code() later would have to consider
5804          * all ld_imm64 insns as relocation candidates. That would
5805          * reduce relocation speed, since amount of find_prog_insn_relo()
5806          * would increase and most of them will fail to find a relo.
5807          */
5808         for (i = 0; i < obj->nr_programs; i++) {
5809                 prog = &obj->programs[i];
5810                 for (j = 0; j < prog->nr_reloc; j++) {
5811                         struct reloc_desc *relo = &prog->reloc_desc[j];
5812                         struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5813
5814                         /* mark the insn, so it's recognized by insn_is_pseudo_func() */
5815                         if (relo->type == RELO_SUBPROG_ADDR)
5816                                 insn[0].src_reg = BPF_PSEUDO_FUNC;
5817                 }
5818         }
5819
5820         /* relocate subprogram calls and append used subprograms to main
5821          * programs; each copy of subprogram code needs to be relocated
5822          * differently for each main program, because its code location might
5823          * have changed.
5824          * Append subprog relos to main programs to allow data relos to be
5825          * processed after text is completely relocated.
5826          */
5827         for (i = 0; i < obj->nr_programs; i++) {
5828                 prog = &obj->programs[i];
5829                 /* sub-program's sub-calls are relocated within the context of
5830                  * its main program only
5831                  */
5832                 if (prog_is_subprog(obj, prog))
5833                         continue;
5834
5835                 err = bpf_object__relocate_calls(obj, prog);
5836                 if (err) {
5837                         pr_warn("prog '%s': failed to relocate calls: %d\n",
5838                                 prog->name, err);
5839                         return err;
5840                 }
5841         }
5842         /* Process data relos for main programs */
5843         for (i = 0; i < obj->nr_programs; i++) {
5844                 prog = &obj->programs[i];
5845                 if (prog_is_subprog(obj, prog))
5846                         continue;
5847                 err = bpf_object__relocate_data(obj, prog);
5848                 if (err) {
5849                         pr_warn("prog '%s': failed to relocate data references: %d\n",
5850                                 prog->name, err);
5851                         return err;
5852                 }
5853         }
5854         if (!obj->gen_loader)
5855                 bpf_object__free_relocs(obj);
5856         return 0;
5857 }
5858
5859 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
5860                                             GElf_Shdr *shdr, Elf_Data *data);
5861
5862 static int bpf_object__collect_map_relos(struct bpf_object *obj,
5863                                          GElf_Shdr *shdr, Elf_Data *data)
5864 {
5865         const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
5866         int i, j, nrels, new_sz;
5867         const struct btf_var_secinfo *vi = NULL;
5868         const struct btf_type *sec, *var, *def;
5869         struct bpf_map *map = NULL, *targ_map;
5870         const struct btf_member *member;
5871         const char *name, *mname;
5872         Elf_Data *symbols;
5873         unsigned int moff;
5874         GElf_Sym sym;
5875         GElf_Rel rel;
5876         void *tmp;
5877
5878         if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
5879                 return -EINVAL;
5880         sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
5881         if (!sec)
5882                 return -EINVAL;
5883
5884         symbols = obj->efile.symbols;
5885         nrels = shdr->sh_size / shdr->sh_entsize;
5886         for (i = 0; i < nrels; i++) {
5887                 if (!gelf_getrel(data, i, &rel)) {
5888                         pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
5889                         return -LIBBPF_ERRNO__FORMAT;
5890                 }
5891                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
5892                         pr_warn(".maps relo #%d: symbol %zx not found\n",
5893                                 i, (size_t)GELF_R_SYM(rel.r_info));
5894                         return -LIBBPF_ERRNO__FORMAT;
5895                 }
5896                 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
5897                 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
5898                         pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
5899                                 i, name);
5900                         return -LIBBPF_ERRNO__RELOC;
5901                 }
5902
5903                 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
5904                          i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
5905                          (size_t)rel.r_offset, sym.st_name, name);
5906
5907                 for (j = 0; j < obj->nr_maps; j++) {
5908                         map = &obj->maps[j];
5909                         if (map->sec_idx != obj->efile.btf_maps_shndx)
5910                                 continue;
5911
5912                         vi = btf_var_secinfos(sec) + map->btf_var_idx;
5913                         if (vi->offset <= rel.r_offset &&
5914                             rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
5915                                 break;
5916                 }
5917                 if (j == obj->nr_maps) {
5918                         pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
5919                                 i, name, (size_t)rel.r_offset);
5920                         return -EINVAL;
5921                 }
5922
5923                 if (!bpf_map_type__is_map_in_map(map->def.type))
5924                         return -EINVAL;
5925                 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
5926                     map->def.key_size != sizeof(int)) {
5927                         pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
5928                                 i, map->name, sizeof(int));
5929                         return -EINVAL;
5930                 }
5931
5932                 targ_map = bpf_object__find_map_by_name(obj, name);
5933                 if (!targ_map)
5934                         return -ESRCH;
5935
5936                 var = btf__type_by_id(obj->btf, vi->type);
5937                 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
5938                 if (btf_vlen(def) == 0)
5939                         return -EINVAL;
5940                 member = btf_members(def) + btf_vlen(def) - 1;
5941                 mname = btf__name_by_offset(obj->btf, member->name_off);
5942                 if (strcmp(mname, "values"))
5943                         return -EINVAL;
5944
5945                 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
5946                 if (rel.r_offset - vi->offset < moff)
5947                         return -EINVAL;
5948
5949                 moff = rel.r_offset - vi->offset - moff;
5950                 /* here we use BPF pointer size, which is always 64 bit, as we
5951                  * are parsing ELF that was built for BPF target
5952                  */
5953                 if (moff % bpf_ptr_sz)
5954                         return -EINVAL;
5955                 moff /= bpf_ptr_sz;
5956                 if (moff >= map->init_slots_sz) {
5957                         new_sz = moff + 1;
5958                         tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
5959                         if (!tmp)
5960                                 return -ENOMEM;
5961                         map->init_slots = tmp;
5962                         memset(map->init_slots + map->init_slots_sz, 0,
5963                                (new_sz - map->init_slots_sz) * host_ptr_sz);
5964                         map->init_slots_sz = new_sz;
5965                 }
5966                 map->init_slots[moff] = targ_map;
5967
5968                 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
5969                          i, map->name, moff, name);
5970         }
5971
5972         return 0;
5973 }
5974
5975 static int cmp_relocs(const void *_a, const void *_b)
5976 {
5977         const struct reloc_desc *a = _a;
5978         const struct reloc_desc *b = _b;
5979
5980         if (a->insn_idx != b->insn_idx)
5981                 return a->insn_idx < b->insn_idx ? -1 : 1;
5982
5983         /* no two relocations should have the same insn_idx, but ... */
5984         if (a->type != b->type)
5985                 return a->type < b->type ? -1 : 1;
5986
5987         return 0;
5988 }
5989
5990 static int bpf_object__collect_relos(struct bpf_object *obj)
5991 {
5992         int i, err;
5993
5994         for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
5995                 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
5996                 Elf_Data *data = obj->efile.reloc_sects[i].data;
5997                 int idx = shdr->sh_info;
5998
5999                 if (shdr->sh_type != SHT_REL) {
6000                         pr_warn("internal error at %d\n", __LINE__);
6001                         return -LIBBPF_ERRNO__INTERNAL;
6002                 }
6003
6004                 if (idx == obj->efile.st_ops_shndx)
6005                         err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6006                 else if (idx == obj->efile.btf_maps_shndx)
6007                         err = bpf_object__collect_map_relos(obj, shdr, data);
6008                 else
6009                         err = bpf_object__collect_prog_relos(obj, shdr, data);
6010                 if (err)
6011                         return err;
6012         }
6013
6014         for (i = 0; i < obj->nr_programs; i++) {
6015                 struct bpf_program *p = &obj->programs[i];
6016
6017                 if (!p->nr_reloc)
6018                         continue;
6019
6020                 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6021         }
6022         return 0;
6023 }
6024
6025 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6026 {
6027         if (BPF_CLASS(insn->code) == BPF_JMP &&
6028             BPF_OP(insn->code) == BPF_CALL &&
6029             BPF_SRC(insn->code) == BPF_K &&
6030             insn->src_reg == 0 &&
6031             insn->dst_reg == 0) {
6032                     *func_id = insn->imm;
6033                     return true;
6034         }
6035         return false;
6036 }
6037
6038 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
6039 {
6040         struct bpf_insn *insn = prog->insns;
6041         enum bpf_func_id func_id;
6042         int i;
6043
6044         if (obj->gen_loader)
6045                 return 0;
6046
6047         for (i = 0; i < prog->insns_cnt; i++, insn++) {
6048                 if (!insn_is_helper_call(insn, &func_id))
6049                         continue;
6050
6051                 /* on kernels that don't yet support
6052                  * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6053                  * to bpf_probe_read() which works well for old kernels
6054                  */
6055                 switch (func_id) {
6056                 case BPF_FUNC_probe_read_kernel:
6057                 case BPF_FUNC_probe_read_user:
6058                         if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6059                                 insn->imm = BPF_FUNC_probe_read;
6060                         break;
6061                 case BPF_FUNC_probe_read_kernel_str:
6062                 case BPF_FUNC_probe_read_user_str:
6063                         if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6064                                 insn->imm = BPF_FUNC_probe_read_str;
6065                         break;
6066                 default:
6067                         break;
6068                 }
6069         }
6070         return 0;
6071 }
6072
6073 static int
6074 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
6075              char *license, __u32 kern_version, int *pfd)
6076 {
6077         struct bpf_prog_load_params load_attr = {};
6078         char *cp, errmsg[STRERR_BUFSIZE];
6079         size_t log_buf_size = 0;
6080         char *log_buf = NULL;
6081         int btf_fd, ret;
6082
6083         if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6084                 /*
6085                  * The program type must be set.  Most likely we couldn't find a proper
6086                  * section definition at load time, and thus we didn't infer the type.
6087                  */
6088                 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6089                         prog->name, prog->sec_name);
6090                 return -EINVAL;
6091         }
6092
6093         if (!insns || !insns_cnt)
6094                 return -EINVAL;
6095
6096         load_attr.prog_type = prog->type;
6097         /* old kernels might not support specifying expected_attach_type */
6098         if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
6099             prog->sec_def->is_exp_attach_type_optional)
6100                 load_attr.expected_attach_type = 0;
6101         else
6102                 load_attr.expected_attach_type = prog->expected_attach_type;
6103         if (kernel_supports(prog->obj, FEAT_PROG_NAME))
6104                 load_attr.name = prog->name;
6105         load_attr.insns = insns;
6106         load_attr.insn_cnt = insns_cnt;
6107         load_attr.license = license;
6108         load_attr.attach_btf_id = prog->attach_btf_id;
6109         if (prog->attach_prog_fd)
6110                 load_attr.attach_prog_fd = prog->attach_prog_fd;
6111         else
6112                 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6113         load_attr.attach_btf_id = prog->attach_btf_id;
6114         load_attr.kern_version = kern_version;
6115         load_attr.prog_ifindex = prog->prog_ifindex;
6116
6117         /* specify func_info/line_info only if kernel supports them */
6118         btf_fd = bpf_object__btf_fd(prog->obj);
6119         if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
6120                 load_attr.prog_btf_fd = btf_fd;
6121                 load_attr.func_info = prog->func_info;
6122                 load_attr.func_info_rec_size = prog->func_info_rec_size;
6123                 load_attr.func_info_cnt = prog->func_info_cnt;
6124                 load_attr.line_info = prog->line_info;
6125                 load_attr.line_info_rec_size = prog->line_info_rec_size;
6126                 load_attr.line_info_cnt = prog->line_info_cnt;
6127         }
6128         load_attr.log_level = prog->log_level;
6129         load_attr.prog_flags = prog->prog_flags;
6130
6131         if (prog->obj->gen_loader) {
6132                 bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
6133                                    prog - prog->obj->programs);
6134                 *pfd = -1;
6135                 return 0;
6136         }
6137 retry_load:
6138         if (log_buf_size) {
6139                 log_buf = malloc(log_buf_size);
6140                 if (!log_buf)
6141                         return -ENOMEM;
6142
6143                 *log_buf = 0;
6144         }
6145
6146         load_attr.log_buf = log_buf;
6147         load_attr.log_buf_sz = log_buf_size;
6148         ret = libbpf__bpf_prog_load(&load_attr);
6149
6150         if (ret >= 0) {
6151                 if (log_buf && load_attr.log_level)
6152                         pr_debug("verifier log:\n%s", log_buf);
6153
6154                 if (prog->obj->rodata_map_idx >= 0 &&
6155                     kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
6156                         struct bpf_map *rodata_map =
6157                                 &prog->obj->maps[prog->obj->rodata_map_idx];
6158
6159                         if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6160                                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6161                                 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6162                                         prog->name, cp);
6163                                 /* Don't fail hard if can't bind rodata. */
6164                         }
6165                 }
6166
6167                 *pfd = ret;
6168                 ret = 0;
6169                 goto out;
6170         }
6171
6172         if (!log_buf || errno == ENOSPC) {
6173                 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
6174                                    log_buf_size << 1);
6175
6176                 free(log_buf);
6177                 goto retry_load;
6178         }
6179         ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
6180         cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6181         pr_warn("load bpf program failed: %s\n", cp);
6182         pr_perm_msg(ret);
6183
6184         if (log_buf && log_buf[0] != '\0') {
6185                 ret = -LIBBPF_ERRNO__VERIFY;
6186                 pr_warn("-- BEGIN DUMP LOG ---\n");
6187                 pr_warn("\n%s\n", log_buf);
6188                 pr_warn("-- END LOG --\n");
6189         } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
6190                 pr_warn("Program too large (%zu insns), at most %d insns\n",
6191                         load_attr.insn_cnt, BPF_MAXINSNS);
6192                 ret = -LIBBPF_ERRNO__PROG2BIG;
6193         } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
6194                 /* Wrong program type? */
6195                 int fd;
6196
6197                 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
6198                 load_attr.expected_attach_type = 0;
6199                 load_attr.log_buf = NULL;
6200                 load_attr.log_buf_sz = 0;
6201                 fd = libbpf__bpf_prog_load(&load_attr);
6202                 if (fd >= 0) {
6203                         close(fd);
6204                         ret = -LIBBPF_ERRNO__PROGTYPE;
6205                         goto out;
6206                 }
6207         }
6208
6209 out:
6210         free(log_buf);
6211         return ret;
6212 }
6213
6214 static int bpf_program__record_externs(struct bpf_program *prog)
6215 {
6216         struct bpf_object *obj = prog->obj;
6217         int i;
6218
6219         for (i = 0; i < prog->nr_reloc; i++) {
6220                 struct reloc_desc *relo = &prog->reloc_desc[i];
6221                 struct extern_desc *ext = &obj->externs[relo->sym_off];
6222
6223                 switch (relo->type) {
6224                 case RELO_EXTERN_VAR:
6225                         if (ext->type != EXT_KSYM)
6226                                 continue;
6227                         if (!ext->ksym.type_id) {
6228                                 pr_warn("typeless ksym %s is not supported yet\n",
6229                                         ext->name);
6230                                 return -ENOTSUP;
6231                         }
6232                         bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
6233                                                relo->insn_idx);
6234                         break;
6235                 case RELO_EXTERN_FUNC:
6236                         bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
6237                                                relo->insn_idx);
6238                         break;
6239                 default:
6240                         continue;
6241                 }
6242         }
6243         return 0;
6244 }
6245
6246 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
6247
6248 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
6249 {
6250         int err = 0, fd, i;
6251
6252         if (prog->obj->loaded) {
6253                 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6254                 return libbpf_err(-EINVAL);
6255         }
6256
6257         if ((prog->type == BPF_PROG_TYPE_TRACING ||
6258              prog->type == BPF_PROG_TYPE_LSM ||
6259              prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6260                 int btf_obj_fd = 0, btf_type_id = 0;
6261
6262                 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
6263                 if (err)
6264                         return libbpf_err(err);
6265
6266                 prog->attach_btf_obj_fd = btf_obj_fd;
6267                 prog->attach_btf_id = btf_type_id;
6268         }
6269
6270         if (prog->instances.nr < 0 || !prog->instances.fds) {
6271                 if (prog->preprocessor) {
6272                         pr_warn("Internal error: can't load program '%s'\n",
6273                                 prog->name);
6274                         return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
6275                 }
6276
6277                 prog->instances.fds = malloc(sizeof(int));
6278                 if (!prog->instances.fds) {
6279                         pr_warn("Not enough memory for BPF fds\n");
6280                         return libbpf_err(-ENOMEM);
6281                 }
6282                 prog->instances.nr = 1;
6283                 prog->instances.fds[0] = -1;
6284         }
6285
6286         if (!prog->preprocessor) {
6287                 if (prog->instances.nr != 1) {
6288                         pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
6289                                 prog->name, prog->instances.nr);
6290                 }
6291                 if (prog->obj->gen_loader)
6292                         bpf_program__record_externs(prog);
6293                 err = load_program(prog, prog->insns, prog->insns_cnt,
6294                                    license, kern_ver, &fd);
6295                 if (!err)
6296                         prog->instances.fds[0] = fd;
6297                 goto out;
6298         }
6299
6300         for (i = 0; i < prog->instances.nr; i++) {
6301                 struct bpf_prog_prep_result result;
6302                 bpf_program_prep_t preprocessor = prog->preprocessor;
6303
6304                 memset(&result, 0, sizeof(result));
6305                 err = preprocessor(prog, i, prog->insns,
6306                                    prog->insns_cnt, &result);
6307                 if (err) {
6308                         pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
6309                                 i, prog->name);
6310                         goto out;
6311                 }
6312
6313                 if (!result.new_insn_ptr || !result.new_insn_cnt) {
6314                         pr_debug("Skip loading the %dth instance of program '%s'\n",
6315                                  i, prog->name);
6316                         prog->instances.fds[i] = -1;
6317                         if (result.pfd)
6318                                 *result.pfd = -1;
6319                         continue;
6320                 }
6321
6322                 err = load_program(prog, result.new_insn_ptr,
6323                                    result.new_insn_cnt, license, kern_ver, &fd);
6324                 if (err) {
6325                         pr_warn("Loading the %dth instance of program '%s' failed\n",
6326                                 i, prog->name);
6327                         goto out;
6328                 }
6329
6330                 if (result.pfd)
6331                         *result.pfd = fd;
6332                 prog->instances.fds[i] = fd;
6333         }
6334 out:
6335         if (err)
6336                 pr_warn("failed to load program '%s'\n", prog->name);
6337         zfree(&prog->insns);
6338         prog->insns_cnt = 0;
6339         return libbpf_err(err);
6340 }
6341
6342 static int
6343 bpf_object__load_progs(struct bpf_object *obj, int log_level)
6344 {
6345         struct bpf_program *prog;
6346         size_t i;
6347         int err;
6348
6349         for (i = 0; i < obj->nr_programs; i++) {
6350                 prog = &obj->programs[i];
6351                 err = bpf_object__sanitize_prog(obj, prog);
6352                 if (err)
6353                         return err;
6354         }
6355
6356         for (i = 0; i < obj->nr_programs; i++) {
6357                 prog = &obj->programs[i];
6358                 if (prog_is_subprog(obj, prog))
6359                         continue;
6360                 if (!prog->load) {
6361                         pr_debug("prog '%s': skipped loading\n", prog->name);
6362                         continue;
6363                 }
6364                 prog->log_level |= log_level;
6365                 err = bpf_program__load(prog, obj->license, obj->kern_version);
6366                 if (err)
6367                         return err;
6368         }
6369         if (obj->gen_loader)
6370                 bpf_object__free_relocs(obj);
6371         return 0;
6372 }
6373
6374 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
6375
6376 static struct bpf_object *
6377 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
6378                    const struct bpf_object_open_opts *opts)
6379 {
6380         const char *obj_name, *kconfig, *btf_tmp_path;
6381         struct bpf_program *prog;
6382         struct bpf_object *obj;
6383         char tmp_name[64];
6384         int err;
6385
6386         if (elf_version(EV_CURRENT) == EV_NONE) {
6387                 pr_warn("failed to init libelf for %s\n",
6388                         path ? : "(mem buf)");
6389                 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
6390         }
6391
6392         if (!OPTS_VALID(opts, bpf_object_open_opts))
6393                 return ERR_PTR(-EINVAL);
6394
6395         obj_name = OPTS_GET(opts, object_name, NULL);
6396         if (obj_buf) {
6397                 if (!obj_name) {
6398                         snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
6399                                  (unsigned long)obj_buf,
6400                                  (unsigned long)obj_buf_sz);
6401                         obj_name = tmp_name;
6402                 }
6403                 path = obj_name;
6404                 pr_debug("loading object '%s' from buffer\n", obj_name);
6405         }
6406
6407         obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6408         if (IS_ERR(obj))
6409                 return obj;
6410
6411         btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
6412         if (btf_tmp_path) {
6413                 if (strlen(btf_tmp_path) >= PATH_MAX) {
6414                         err = -ENAMETOOLONG;
6415                         goto out;
6416                 }
6417                 obj->btf_custom_path = strdup(btf_tmp_path);
6418                 if (!obj->btf_custom_path) {
6419                         err = -ENOMEM;
6420                         goto out;
6421                 }
6422         }
6423
6424         kconfig = OPTS_GET(opts, kconfig, NULL);
6425         if (kconfig) {
6426                 obj->kconfig = strdup(kconfig);
6427                 if (!obj->kconfig) {
6428                         err = -ENOMEM;
6429                         goto out;
6430                 }
6431         }
6432
6433         err = bpf_object__elf_init(obj);
6434         err = err ? : bpf_object__check_endianness(obj);
6435         err = err ? : bpf_object__elf_collect(obj);
6436         err = err ? : bpf_object__collect_externs(obj);
6437         err = err ? : bpf_object__finalize_btf(obj);
6438         err = err ? : bpf_object__init_maps(obj, opts);
6439         err = err ? : bpf_object__collect_relos(obj);
6440         if (err)
6441                 goto out;
6442         bpf_object__elf_finish(obj);
6443
6444         bpf_object__for_each_program(prog, obj) {
6445                 prog->sec_def = find_sec_def(prog->sec_name);
6446                 if (!prog->sec_def) {
6447                         /* couldn't guess, but user might manually specify */
6448                         pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
6449                                 prog->name, prog->sec_name);
6450                         continue;
6451                 }
6452
6453                 if (prog->sec_def->is_sleepable)
6454                         prog->prog_flags |= BPF_F_SLEEPABLE;
6455                 bpf_program__set_type(prog, prog->sec_def->prog_type);
6456                 bpf_program__set_expected_attach_type(prog,
6457                                 prog->sec_def->expected_attach_type);
6458
6459                 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
6460                     prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
6461                         prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
6462         }
6463
6464         return obj;
6465 out:
6466         bpf_object__close(obj);
6467         return ERR_PTR(err);
6468 }
6469
6470 static struct bpf_object *
6471 __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
6472 {
6473         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
6474                 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
6475         );
6476
6477         /* param validation */
6478         if (!attr->file)
6479                 return NULL;
6480
6481         pr_debug("loading %s\n", attr->file);
6482         return __bpf_object__open(attr->file, NULL, 0, &opts);
6483 }
6484
6485 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
6486 {
6487         return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
6488 }
6489
6490 struct bpf_object *bpf_object__open(const char *path)
6491 {
6492         struct bpf_object_open_attr attr = {
6493                 .file           = path,
6494                 .prog_type      = BPF_PROG_TYPE_UNSPEC,
6495         };
6496
6497         return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
6498 }
6499
6500 struct bpf_object *
6501 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
6502 {
6503         if (!path)
6504                 return libbpf_err_ptr(-EINVAL);
6505
6506         pr_debug("loading %s\n", path);
6507
6508         return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts));
6509 }
6510
6511 struct bpf_object *
6512 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
6513                      const struct bpf_object_open_opts *opts)
6514 {
6515         if (!obj_buf || obj_buf_sz == 0)
6516                 return libbpf_err_ptr(-EINVAL);
6517
6518         return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts));
6519 }
6520
6521 struct bpf_object *
6522 bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
6523                         const char *name)
6524 {
6525         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
6526                 .object_name = name,
6527                 /* wrong default, but backwards-compatible */
6528                 .relaxed_maps = true,
6529         );
6530
6531         /* returning NULL is wrong, but backwards-compatible */
6532         if (!obj_buf || obj_buf_sz == 0)
6533                 return errno = EINVAL, NULL;
6534
6535         return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
6536 }
6537
6538 int bpf_object__unload(struct bpf_object *obj)
6539 {
6540         size_t i;
6541
6542         if (!obj)
6543                 return libbpf_err(-EINVAL);
6544
6545         for (i = 0; i < obj->nr_maps; i++) {
6546                 zclose(obj->maps[i].fd);
6547                 if (obj->maps[i].st_ops)
6548                         zfree(&obj->maps[i].st_ops->kern_vdata);
6549         }
6550
6551         for (i = 0; i < obj->nr_programs; i++)
6552                 bpf_program__unload(&obj->programs[i]);
6553
6554         return 0;
6555 }
6556
6557 static int bpf_object__sanitize_maps(struct bpf_object *obj)
6558 {
6559         struct bpf_map *m;
6560
6561         bpf_object__for_each_map(m, obj) {
6562                 if (!bpf_map__is_internal(m))
6563                         continue;
6564                 if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
6565                         pr_warn("kernel doesn't support global data\n");
6566                         return -ENOTSUP;
6567                 }
6568                 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
6569                         m->def.map_flags ^= BPF_F_MMAPABLE;
6570         }
6571
6572         return 0;
6573 }
6574
6575 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
6576 {
6577         char sym_type, sym_name[500];
6578         unsigned long long sym_addr;
6579         const struct btf_type *t;
6580         struct extern_desc *ext;
6581         int ret, err = 0;
6582         FILE *f;
6583
6584         f = fopen("/proc/kallsyms", "r");
6585         if (!f) {
6586                 err = -errno;
6587                 pr_warn("failed to open /proc/kallsyms: %d\n", err);
6588                 return err;
6589         }
6590
6591         while (true) {
6592                 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
6593                              &sym_addr, &sym_type, sym_name);
6594                 if (ret == EOF && feof(f))
6595                         break;
6596                 if (ret != 3) {
6597                         pr_warn("failed to read kallsyms entry: %d\n", ret);
6598                         err = -EINVAL;
6599                         goto out;
6600                 }
6601
6602                 ext = find_extern_by_name(obj, sym_name);
6603                 if (!ext || ext->type != EXT_KSYM)
6604                         continue;
6605
6606                 t = btf__type_by_id(obj->btf, ext->btf_id);
6607                 if (!btf_is_var(t))
6608                         continue;
6609
6610                 if (ext->is_set && ext->ksym.addr != sym_addr) {
6611                         pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
6612                                 sym_name, ext->ksym.addr, sym_addr);
6613                         err = -EINVAL;
6614                         goto out;
6615                 }
6616                 if (!ext->is_set) {
6617                         ext->is_set = true;
6618                         ext->ksym.addr = sym_addr;
6619                         pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
6620                 }
6621         }
6622
6623 out:
6624         fclose(f);
6625         return err;
6626 }
6627
6628 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
6629                             __u16 kind, struct btf **res_btf,
6630                             int *res_btf_fd)
6631 {
6632         int i, id, btf_fd, err;
6633         struct btf *btf;
6634
6635         btf = obj->btf_vmlinux;
6636         btf_fd = 0;
6637         id = btf__find_by_name_kind(btf, ksym_name, kind);
6638
6639         if (id == -ENOENT) {
6640                 err = load_module_btfs(obj);
6641                 if (err)
6642                         return err;
6643
6644                 for (i = 0; i < obj->btf_module_cnt; i++) {
6645                         btf = obj->btf_modules[i].btf;
6646                         /* we assume module BTF FD is always >0 */
6647                         btf_fd = obj->btf_modules[i].fd;
6648                         id = btf__find_by_name_kind(btf, ksym_name, kind);
6649                         if (id != -ENOENT)
6650                                 break;
6651                 }
6652         }
6653         if (id <= 0)
6654                 return -ESRCH;
6655
6656         *res_btf = btf;
6657         *res_btf_fd = btf_fd;
6658         return id;
6659 }
6660
6661 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
6662                                                struct extern_desc *ext)
6663 {
6664         const struct btf_type *targ_var, *targ_type;
6665         __u32 targ_type_id, local_type_id;
6666         const char *targ_var_name;
6667         int id, btf_fd = 0, err;
6668         struct btf *btf = NULL;
6669
6670         id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
6671         if (id == -ESRCH && ext->is_weak) {
6672                 return 0;
6673         } else if (id < 0) {
6674                 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
6675                         ext->name);
6676                 return id;
6677         }
6678
6679         /* find local type_id */
6680         local_type_id = ext->ksym.type_id;
6681
6682         /* find target type_id */
6683         targ_var = btf__type_by_id(btf, id);
6684         targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
6685         targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
6686
6687         err = bpf_core_types_are_compat(obj->btf, local_type_id,
6688                                         btf, targ_type_id);
6689         if (err <= 0) {
6690                 const struct btf_type *local_type;
6691                 const char *targ_name, *local_name;
6692
6693                 local_type = btf__type_by_id(obj->btf, local_type_id);
6694                 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
6695                 targ_name = btf__name_by_offset(btf, targ_type->name_off);
6696
6697                 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
6698                         ext->name, local_type_id,
6699                         btf_kind_str(local_type), local_name, targ_type_id,
6700                         btf_kind_str(targ_type), targ_name);
6701                 return -EINVAL;
6702         }
6703
6704         ext->is_set = true;
6705         ext->ksym.kernel_btf_obj_fd = btf_fd;
6706         ext->ksym.kernel_btf_id = id;
6707         pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
6708                  ext->name, id, btf_kind_str(targ_var), targ_var_name);
6709
6710         return 0;
6711 }
6712
6713 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
6714                                                 struct extern_desc *ext)
6715 {
6716         int local_func_proto_id, kfunc_proto_id, kfunc_id;
6717         const struct btf_type *kern_func;
6718         struct btf *kern_btf = NULL;
6719         int ret, kern_btf_fd = 0;
6720
6721         local_func_proto_id = ext->ksym.type_id;
6722
6723         kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
6724                                     &kern_btf, &kern_btf_fd);
6725         if (kfunc_id < 0) {
6726                 pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
6727                         ext->name);
6728                 return kfunc_id;
6729         }
6730
6731         if (kern_btf != obj->btf_vmlinux) {
6732                 pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
6733                         ext->name);
6734                 return -ENOTSUP;
6735         }
6736
6737         kern_func = btf__type_by_id(kern_btf, kfunc_id);
6738         kfunc_proto_id = kern_func->type;
6739
6740         ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
6741                                         kern_btf, kfunc_proto_id);
6742         if (ret <= 0) {
6743                 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
6744                         ext->name, local_func_proto_id, kfunc_proto_id);
6745                 return -EINVAL;
6746         }
6747
6748         ext->is_set = true;
6749         ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
6750         ext->ksym.kernel_btf_id = kfunc_id;
6751         pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
6752                  ext->name, kfunc_id);
6753
6754         return 0;
6755 }
6756
6757 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
6758 {
6759         const struct btf_type *t;
6760         struct extern_desc *ext;
6761         int i, err;
6762
6763         for (i = 0; i < obj->nr_extern; i++) {
6764                 ext = &obj->externs[i];
6765                 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
6766                         continue;
6767
6768                 if (obj->gen_loader) {
6769                         ext->is_set = true;
6770                         ext->ksym.kernel_btf_obj_fd = 0;
6771                         ext->ksym.kernel_btf_id = 0;
6772                         continue;
6773                 }
6774                 t = btf__type_by_id(obj->btf, ext->btf_id);
6775                 if (btf_is_var(t))
6776                         err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
6777                 else
6778                         err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
6779                 if (err)
6780                         return err;
6781         }
6782         return 0;
6783 }
6784
6785 static int bpf_object__resolve_externs(struct bpf_object *obj,
6786                                        const char *extra_kconfig)
6787 {
6788         bool need_config = false, need_kallsyms = false;
6789         bool need_vmlinux_btf = false;
6790         struct extern_desc *ext;
6791         void *kcfg_data = NULL;
6792         int err, i;
6793
6794         if (obj->nr_extern == 0)
6795                 return 0;
6796
6797         if (obj->kconfig_map_idx >= 0)
6798                 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
6799
6800         for (i = 0; i < obj->nr_extern; i++) {
6801                 ext = &obj->externs[i];
6802
6803                 if (ext->type == EXT_KCFG &&
6804                     strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
6805                         void *ext_val = kcfg_data + ext->kcfg.data_off;
6806                         __u32 kver = get_kernel_version();
6807
6808                         if (!kver) {
6809                                 pr_warn("failed to get kernel version\n");
6810                                 return -EINVAL;
6811                         }
6812                         err = set_kcfg_value_num(ext, ext_val, kver);
6813                         if (err)
6814                                 return err;
6815                         pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
6816                 } else if (ext->type == EXT_KCFG &&
6817                            strncmp(ext->name, "CONFIG_", 7) == 0) {
6818                         need_config = true;
6819                 } else if (ext->type == EXT_KSYM) {
6820                         if (ext->ksym.type_id)
6821                                 need_vmlinux_btf = true;
6822                         else
6823                                 need_kallsyms = true;
6824                 } else {
6825                         pr_warn("unrecognized extern '%s'\n", ext->name);
6826                         return -EINVAL;
6827                 }
6828         }
6829         if (need_config && extra_kconfig) {
6830                 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
6831                 if (err)
6832                         return -EINVAL;
6833                 need_config = false;
6834                 for (i = 0; i < obj->nr_extern; i++) {
6835                         ext = &obj->externs[i];
6836                         if (ext->type == EXT_KCFG && !ext->is_set) {
6837                                 need_config = true;
6838                                 break;
6839                         }
6840                 }
6841         }
6842         if (need_config) {
6843                 err = bpf_object__read_kconfig_file(obj, kcfg_data);
6844                 if (err)
6845                         return -EINVAL;
6846         }
6847         if (need_kallsyms) {
6848                 err = bpf_object__read_kallsyms_file(obj);
6849                 if (err)
6850                         return -EINVAL;
6851         }
6852         if (need_vmlinux_btf) {
6853                 err = bpf_object__resolve_ksyms_btf_id(obj);
6854                 if (err)
6855                         return -EINVAL;
6856         }
6857         for (i = 0; i < obj->nr_extern; i++) {
6858                 ext = &obj->externs[i];
6859
6860                 if (!ext->is_set && !ext->is_weak) {
6861                         pr_warn("extern %s (strong) not resolved\n", ext->name);
6862                         return -ESRCH;
6863                 } else if (!ext->is_set) {
6864                         pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
6865                                  ext->name);
6866                 }
6867         }
6868
6869         return 0;
6870 }
6871
6872 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
6873 {
6874         struct bpf_object *obj;
6875         int err, i;
6876
6877         if (!attr)
6878                 return libbpf_err(-EINVAL);
6879         obj = attr->obj;
6880         if (!obj)
6881                 return libbpf_err(-EINVAL);
6882
6883         if (obj->loaded) {
6884                 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
6885                 return libbpf_err(-EINVAL);
6886         }
6887
6888         if (obj->gen_loader)
6889                 bpf_gen__init(obj->gen_loader, attr->log_level);
6890
6891         err = bpf_object__probe_loading(obj);
6892         err = err ? : bpf_object__load_vmlinux_btf(obj, false);
6893         err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
6894         err = err ? : bpf_object__sanitize_and_load_btf(obj);
6895         err = err ? : bpf_object__sanitize_maps(obj);
6896         err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
6897         err = err ? : bpf_object__create_maps(obj);
6898         err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path);
6899         err = err ? : bpf_object__load_progs(obj, attr->log_level);
6900
6901         if (obj->gen_loader) {
6902                 /* reset FDs */
6903                 if (obj->btf)
6904                         btf__set_fd(obj->btf, -1);
6905                 for (i = 0; i < obj->nr_maps; i++)
6906                         obj->maps[i].fd = -1;
6907                 if (!err)
6908                         err = bpf_gen__finish(obj->gen_loader);
6909         }
6910
6911         /* clean up module BTFs */
6912         for (i = 0; i < obj->btf_module_cnt; i++) {
6913                 close(obj->btf_modules[i].fd);
6914                 btf__free(obj->btf_modules[i].btf);
6915                 free(obj->btf_modules[i].name);
6916         }
6917         free(obj->btf_modules);
6918
6919         /* clean up vmlinux BTF */
6920         btf__free(obj->btf_vmlinux);
6921         obj->btf_vmlinux = NULL;
6922
6923         obj->loaded = true; /* doesn't matter if successfully or not */
6924
6925         if (err)
6926                 goto out;
6927
6928         return 0;
6929 out:
6930         /* unpin any maps that were auto-pinned during load */
6931         for (i = 0; i < obj->nr_maps; i++)
6932                 if (obj->maps[i].pinned && !obj->maps[i].reused)
6933                         bpf_map__unpin(&obj->maps[i], NULL);
6934
6935         bpf_object__unload(obj);
6936         pr_warn("failed to load object '%s'\n", obj->path);
6937         return libbpf_err(err);
6938 }
6939
6940 int bpf_object__load(struct bpf_object *obj)
6941 {
6942         struct bpf_object_load_attr attr = {
6943                 .obj = obj,
6944         };
6945
6946         return bpf_object__load_xattr(&attr);
6947 }
6948
6949 static int make_parent_dir(const char *path)
6950 {
6951         char *cp, errmsg[STRERR_BUFSIZE];
6952         char *dname, *dir;
6953         int err = 0;
6954
6955         dname = strdup(path);
6956         if (dname == NULL)
6957                 return -ENOMEM;
6958
6959         dir = dirname(dname);
6960         if (mkdir(dir, 0700) && errno != EEXIST)
6961                 err = -errno;
6962
6963         free(dname);
6964         if (err) {
6965                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
6966                 pr_warn("failed to mkdir %s: %s\n", path, cp);
6967         }
6968         return err;
6969 }
6970
6971 static int check_path(const char *path)
6972 {
6973         char *cp, errmsg[STRERR_BUFSIZE];
6974         struct statfs st_fs;
6975         char *dname, *dir;
6976         int err = 0;
6977
6978         if (path == NULL)
6979                 return -EINVAL;
6980
6981         dname = strdup(path);
6982         if (dname == NULL)
6983                 return -ENOMEM;
6984
6985         dir = dirname(dname);
6986         if (statfs(dir, &st_fs)) {
6987                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6988                 pr_warn("failed to statfs %s: %s\n", dir, cp);
6989                 err = -errno;
6990         }
6991         free(dname);
6992
6993         if (!err && st_fs.f_type != BPF_FS_MAGIC) {
6994                 pr_warn("specified path %s is not on BPF FS\n", path);
6995                 err = -EINVAL;
6996         }
6997
6998         return err;
6999 }
7000
7001 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7002                               int instance)
7003 {
7004         char *cp, errmsg[STRERR_BUFSIZE];
7005         int err;
7006
7007         err = make_parent_dir(path);
7008         if (err)
7009                 return libbpf_err(err);
7010
7011         err = check_path(path);
7012         if (err)
7013                 return libbpf_err(err);
7014
7015         if (prog == NULL) {
7016                 pr_warn("invalid program pointer\n");
7017                 return libbpf_err(-EINVAL);
7018         }
7019
7020         if (instance < 0 || instance >= prog->instances.nr) {
7021                 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7022                         instance, prog->name, prog->instances.nr);
7023                 return libbpf_err(-EINVAL);
7024         }
7025
7026         if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7027                 err = -errno;
7028                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7029                 pr_warn("failed to pin program: %s\n", cp);
7030                 return libbpf_err(err);
7031         }
7032         pr_debug("pinned program '%s'\n", path);
7033
7034         return 0;
7035 }
7036
7037 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7038                                 int instance)
7039 {
7040         int err;
7041
7042         err = check_path(path);
7043         if (err)
7044                 return libbpf_err(err);
7045
7046         if (prog == NULL) {
7047                 pr_warn("invalid program pointer\n");
7048                 return libbpf_err(-EINVAL);
7049         }
7050
7051         if (instance < 0 || instance >= prog->instances.nr) {
7052                 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7053                         instance, prog->name, prog->instances.nr);
7054                 return libbpf_err(-EINVAL);
7055         }
7056
7057         err = unlink(path);
7058         if (err != 0)
7059                 return libbpf_err(-errno);
7060
7061         pr_debug("unpinned program '%s'\n", path);
7062
7063         return 0;
7064 }
7065
7066 int bpf_program__pin(struct bpf_program *prog, const char *path)
7067 {
7068         int i, err;
7069
7070         err = make_parent_dir(path);
7071         if (err)
7072                 return libbpf_err(err);
7073
7074         err = check_path(path);
7075         if (err)
7076                 return libbpf_err(err);
7077
7078         if (prog == NULL) {
7079                 pr_warn("invalid program pointer\n");
7080                 return libbpf_err(-EINVAL);
7081         }
7082
7083         if (prog->instances.nr <= 0) {
7084                 pr_warn("no instances of prog %s to pin\n", prog->name);
7085                 return libbpf_err(-EINVAL);
7086         }
7087
7088         if (prog->instances.nr == 1) {
7089                 /* don't create subdirs when pinning single instance */
7090                 return bpf_program__pin_instance(prog, path, 0);
7091         }
7092
7093         for (i = 0; i < prog->instances.nr; i++) {
7094                 char buf[PATH_MAX];
7095                 int len;
7096
7097                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7098                 if (len < 0) {
7099                         err = -EINVAL;
7100                         goto err_unpin;
7101                 } else if (len >= PATH_MAX) {
7102                         err = -ENAMETOOLONG;
7103                         goto err_unpin;
7104                 }
7105
7106                 err = bpf_program__pin_instance(prog, buf, i);
7107                 if (err)
7108                         goto err_unpin;
7109         }
7110
7111         return 0;
7112
7113 err_unpin:
7114         for (i = i - 1; i >= 0; i--) {
7115                 char buf[PATH_MAX];
7116                 int len;
7117
7118                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7119                 if (len < 0)
7120                         continue;
7121                 else if (len >= PATH_MAX)
7122                         continue;
7123
7124                 bpf_program__unpin_instance(prog, buf, i);
7125         }
7126
7127         rmdir(path);
7128
7129         return libbpf_err(err);
7130 }
7131
7132 int bpf_program__unpin(struct bpf_program *prog, const char *path)
7133 {
7134         int i, err;
7135
7136         err = check_path(path);
7137         if (err)
7138                 return libbpf_err(err);
7139
7140         if (prog == NULL) {
7141                 pr_warn("invalid program pointer\n");
7142                 return libbpf_err(-EINVAL);
7143         }
7144
7145         if (prog->instances.nr <= 0) {
7146                 pr_warn("no instances of prog %s to pin\n", prog->name);
7147                 return libbpf_err(-EINVAL);
7148         }
7149
7150         if (prog->instances.nr == 1) {
7151                 /* don't create subdirs when pinning single instance */
7152                 return bpf_program__unpin_instance(prog, path, 0);
7153         }
7154
7155         for (i = 0; i < prog->instances.nr; i++) {
7156                 char buf[PATH_MAX];
7157                 int len;
7158
7159                 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7160                 if (len < 0)
7161                         return libbpf_err(-EINVAL);
7162                 else if (len >= PATH_MAX)
7163                         return libbpf_err(-ENAMETOOLONG);
7164
7165                 err = bpf_program__unpin_instance(prog, buf, i);
7166                 if (err)
7167                         return err;
7168         }
7169
7170         err = rmdir(path);
7171         if (err)
7172                 return libbpf_err(-errno);
7173
7174         return 0;
7175 }
7176
7177 int bpf_map__pin(struct bpf_map *map, const char *path)
7178 {
7179         char *cp, errmsg[STRERR_BUFSIZE];
7180         int err;
7181
7182         if (map == NULL) {
7183                 pr_warn("invalid map pointer\n");
7184                 return libbpf_err(-EINVAL);
7185         }
7186
7187         if (map->pin_path) {
7188                 if (path && strcmp(path, map->pin_path)) {
7189                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7190                                 bpf_map__name(map), map->pin_path, path);
7191                         return libbpf_err(-EINVAL);
7192                 } else if (map->pinned) {
7193                         pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7194                                  bpf_map__name(map), map->pin_path);
7195                         return 0;
7196                 }
7197         } else {
7198                 if (!path) {
7199                         pr_warn("missing a path to pin map '%s' at\n",
7200                                 bpf_map__name(map));
7201                         return libbpf_err(-EINVAL);
7202                 } else if (map->pinned) {
7203                         pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7204                         return libbpf_err(-EEXIST);
7205                 }
7206
7207                 map->pin_path = strdup(path);
7208                 if (!map->pin_path) {
7209                         err = -errno;
7210                         goto out_err;
7211                 }
7212         }
7213
7214         err = make_parent_dir(map->pin_path);
7215         if (err)
7216                 return libbpf_err(err);
7217
7218         err = check_path(map->pin_path);
7219         if (err)
7220                 return libbpf_err(err);
7221
7222         if (bpf_obj_pin(map->fd, map->pin_path)) {
7223                 err = -errno;
7224                 goto out_err;
7225         }
7226
7227         map->pinned = true;
7228         pr_debug("pinned map '%s'\n", map->pin_path);
7229
7230         return 0;
7231
7232 out_err:
7233         cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7234         pr_warn("failed to pin map: %s\n", cp);
7235         return libbpf_err(err);
7236 }
7237
7238 int bpf_map__unpin(struct bpf_map *map, const char *path)
7239 {
7240         int err;
7241
7242         if (map == NULL) {
7243                 pr_warn("invalid map pointer\n");
7244                 return libbpf_err(-EINVAL);
7245         }
7246
7247         if (map->pin_path) {
7248                 if (path && strcmp(path, map->pin_path)) {
7249                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7250                                 bpf_map__name(map), map->pin_path, path);
7251                         return libbpf_err(-EINVAL);
7252                 }
7253                 path = map->pin_path;
7254         } else if (!path) {
7255                 pr_warn("no path to unpin map '%s' from\n",
7256                         bpf_map__name(map));
7257                 return libbpf_err(-EINVAL);
7258         }
7259
7260         err = check_path(path);
7261         if (err)
7262                 return libbpf_err(err);
7263
7264         err = unlink(path);
7265         if (err != 0)
7266                 return libbpf_err(-errno);
7267
7268         map->pinned = false;
7269         pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7270
7271         return 0;
7272 }
7273
7274 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7275 {
7276         char *new = NULL;
7277
7278         if (path) {
7279                 new = strdup(path);
7280                 if (!new)
7281                         return libbpf_err(-errno);
7282         }
7283
7284         free(map->pin_path);
7285         map->pin_path = new;
7286         return 0;
7287 }
7288
7289 const char *bpf_map__get_pin_path(const struct bpf_map *map)
7290 {
7291         return map->pin_path;
7292 }
7293
7294 const char *bpf_map__pin_path(const struct bpf_map *map)
7295 {
7296         return map->pin_path;
7297 }
7298
7299 bool bpf_map__is_pinned(const struct bpf_map *map)
7300 {
7301         return map->pinned;
7302 }
7303
7304 static void sanitize_pin_path(char *s)
7305 {
7306         /* bpffs disallows periods in path names */
7307         while (*s) {
7308                 if (*s == '.')
7309                         *s = '_';
7310                 s++;
7311         }
7312 }
7313
7314 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7315 {
7316         struct bpf_map *map;
7317         int err;
7318
7319         if (!obj)
7320                 return libbpf_err(-ENOENT);
7321
7322         if (!obj->loaded) {
7323                 pr_warn("object not yet loaded; load it first\n");
7324                 return libbpf_err(-ENOENT);
7325         }
7326
7327         bpf_object__for_each_map(map, obj) {
7328                 char *pin_path = NULL;
7329                 char buf[PATH_MAX];
7330
7331                 if (path) {
7332                         int len;
7333
7334                         len = snprintf(buf, PATH_MAX, "%s/%s", path,
7335                                        bpf_map__name(map));
7336                         if (len < 0) {
7337                                 err = -EINVAL;
7338                                 goto err_unpin_maps;
7339                         } else if (len >= PATH_MAX) {
7340                                 err = -ENAMETOOLONG;
7341                                 goto err_unpin_maps;
7342                         }
7343                         sanitize_pin_path(buf);
7344                         pin_path = buf;
7345                 } else if (!map->pin_path) {
7346                         continue;
7347                 }
7348
7349                 err = bpf_map__pin(map, pin_path);
7350                 if (err)
7351                         goto err_unpin_maps;
7352         }
7353
7354         return 0;
7355
7356 err_unpin_maps:
7357         while ((map = bpf_map__prev(map, obj))) {
7358                 if (!map->pin_path)
7359                         continue;
7360
7361                 bpf_map__unpin(map, NULL);
7362         }
7363
7364         return libbpf_err(err);
7365 }
7366
7367 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7368 {
7369         struct bpf_map *map;
7370         int err;
7371
7372         if (!obj)
7373                 return libbpf_err(-ENOENT);
7374
7375         bpf_object__for_each_map(map, obj) {
7376                 char *pin_path = NULL;
7377                 char buf[PATH_MAX];
7378
7379                 if (path) {
7380                         int len;
7381
7382                         len = snprintf(buf, PATH_MAX, "%s/%s", path,
7383                                        bpf_map__name(map));
7384                         if (len < 0)
7385                                 return libbpf_err(-EINVAL);
7386                         else if (len >= PATH_MAX)
7387                                 return libbpf_err(-ENAMETOOLONG);
7388                         sanitize_pin_path(buf);
7389                         pin_path = buf;
7390                 } else if (!map->pin_path) {
7391                         continue;
7392                 }
7393
7394                 err = bpf_map__unpin(map, pin_path);
7395                 if (err)
7396                         return libbpf_err(err);
7397         }
7398
7399         return 0;
7400 }
7401
7402 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
7403 {
7404         struct bpf_program *prog;
7405         int err;
7406
7407         if (!obj)
7408                 return libbpf_err(-ENOENT);
7409
7410         if (!obj->loaded) {
7411                 pr_warn("object not yet loaded; load it first\n");
7412                 return libbpf_err(-ENOENT);
7413         }
7414
7415         bpf_object__for_each_program(prog, obj) {
7416                 char buf[PATH_MAX];
7417                 int len;
7418
7419                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7420                                prog->pin_name);
7421                 if (len < 0) {
7422                         err = -EINVAL;
7423                         goto err_unpin_programs;
7424                 } else if (len >= PATH_MAX) {
7425                         err = -ENAMETOOLONG;
7426                         goto err_unpin_programs;
7427                 }
7428
7429                 err = bpf_program__pin(prog, buf);
7430                 if (err)
7431                         goto err_unpin_programs;
7432         }
7433
7434         return 0;
7435
7436 err_unpin_programs:
7437         while ((prog = bpf_program__prev(prog, obj))) {
7438                 char buf[PATH_MAX];
7439                 int len;
7440
7441                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7442                                prog->pin_name);
7443                 if (len < 0)
7444                         continue;
7445                 else if (len >= PATH_MAX)
7446                         continue;
7447
7448                 bpf_program__unpin(prog, buf);
7449         }
7450
7451         return libbpf_err(err);
7452 }
7453
7454 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
7455 {
7456         struct bpf_program *prog;
7457         int err;
7458
7459         if (!obj)
7460                 return libbpf_err(-ENOENT);
7461
7462         bpf_object__for_each_program(prog, obj) {
7463                 char buf[PATH_MAX];
7464                 int len;
7465
7466                 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7467                                prog->pin_name);
7468                 if (len < 0)
7469                         return libbpf_err(-EINVAL);
7470                 else if (len >= PATH_MAX)
7471                         return libbpf_err(-ENAMETOOLONG);
7472
7473                 err = bpf_program__unpin(prog, buf);
7474                 if (err)
7475                         return libbpf_err(err);
7476         }
7477
7478         return 0;
7479 }
7480
7481 int bpf_object__pin(struct bpf_object *obj, const char *path)
7482 {
7483         int err;
7484
7485         err = bpf_object__pin_maps(obj, path);
7486         if (err)
7487                 return libbpf_err(err);
7488
7489         err = bpf_object__pin_programs(obj, path);
7490         if (err) {
7491                 bpf_object__unpin_maps(obj, path);
7492                 return libbpf_err(err);
7493         }
7494
7495         return 0;
7496 }
7497
7498 static void bpf_map__destroy(struct bpf_map *map)
7499 {
7500         if (map->clear_priv)
7501                 map->clear_priv(map, map->priv);
7502         map->priv = NULL;
7503         map->clear_priv = NULL;
7504
7505         if (map->inner_map) {
7506                 bpf_map__destroy(map->inner_map);
7507                 zfree(&map->inner_map);
7508         }
7509
7510         zfree(&map->init_slots);
7511         map->init_slots_sz = 0;
7512
7513         if (map->mmaped) {
7514                 munmap(map->mmaped, bpf_map_mmap_sz(map));
7515                 map->mmaped = NULL;
7516         }
7517
7518         if (map->st_ops) {
7519                 zfree(&map->st_ops->data);
7520                 zfree(&map->st_ops->progs);
7521                 zfree(&map->st_ops->kern_func_off);
7522                 zfree(&map->st_ops);
7523         }
7524
7525         zfree(&map->name);
7526         zfree(&map->pin_path);
7527
7528         if (map->fd >= 0)
7529                 zclose(map->fd);
7530 }
7531
7532 void bpf_object__close(struct bpf_object *obj)
7533 {
7534         size_t i;
7535
7536         if (IS_ERR_OR_NULL(obj))
7537                 return;
7538
7539         if (obj->clear_priv)
7540                 obj->clear_priv(obj, obj->priv);
7541
7542         bpf_gen__free(obj->gen_loader);
7543         bpf_object__elf_finish(obj);
7544         bpf_object__unload(obj);
7545         btf__free(obj->btf);
7546         btf_ext__free(obj->btf_ext);
7547
7548         for (i = 0; i < obj->nr_maps; i++)
7549                 bpf_map__destroy(&obj->maps[i]);
7550
7551         zfree(&obj->btf_custom_path);
7552         zfree(&obj->kconfig);
7553         zfree(&obj->externs);
7554         obj->nr_extern = 0;
7555
7556         zfree(&obj->maps);
7557         obj->nr_maps = 0;
7558
7559         if (obj->programs && obj->nr_programs) {
7560                 for (i = 0; i < obj->nr_programs; i++)
7561                         bpf_program__exit(&obj->programs[i]);
7562         }
7563         zfree(&obj->programs);
7564
7565         list_del(&obj->list);
7566         free(obj);
7567 }
7568
7569 struct bpf_object *
7570 bpf_object__next(struct bpf_object *prev)
7571 {
7572         struct bpf_object *next;
7573
7574         if (!prev)
7575                 next = list_first_entry(&bpf_objects_list,
7576                                         struct bpf_object,
7577                                         list);
7578         else
7579                 next = list_next_entry(prev, list);
7580
7581         /* Empty list is noticed here so don't need checking on entry. */
7582         if (&next->list == &bpf_objects_list)
7583                 return NULL;
7584
7585         return next;
7586 }
7587
7588 const char *bpf_object__name(const struct bpf_object *obj)
7589 {
7590         return obj ? obj->name : libbpf_err_ptr(-EINVAL);
7591 }
7592
7593 unsigned int bpf_object__kversion(const struct bpf_object *obj)
7594 {
7595         return obj ? obj->kern_version : 0;
7596 }
7597
7598 struct btf *bpf_object__btf(const struct bpf_object *obj)
7599 {
7600         return obj ? obj->btf : NULL;
7601 }
7602
7603 int bpf_object__btf_fd(const struct bpf_object *obj)
7604 {
7605         return obj->btf ? btf__fd(obj->btf) : -1;
7606 }
7607
7608 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
7609 {
7610         if (obj->loaded)
7611                 return libbpf_err(-EINVAL);
7612
7613         obj->kern_version = kern_version;
7614
7615         return 0;
7616 }
7617
7618 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
7619                          bpf_object_clear_priv_t clear_priv)
7620 {
7621         if (obj->priv && obj->clear_priv)
7622                 obj->clear_priv(obj, obj->priv);
7623
7624         obj->priv = priv;
7625         obj->clear_priv = clear_priv;
7626         return 0;
7627 }
7628
7629 void *bpf_object__priv(const struct bpf_object *obj)
7630 {
7631         return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
7632 }
7633
7634 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
7635 {
7636         struct bpf_gen *gen;
7637
7638         if (!opts)
7639                 return -EFAULT;
7640         if (!OPTS_VALID(opts, gen_loader_opts))
7641                 return -EINVAL;
7642         gen = calloc(sizeof(*gen), 1);
7643         if (!gen)
7644                 return -ENOMEM;
7645         gen->opts = opts;
7646         obj->gen_loader = gen;
7647         return 0;
7648 }
7649
7650 static struct bpf_program *
7651 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
7652                     bool forward)
7653 {
7654         size_t nr_programs = obj->nr_programs;
7655         ssize_t idx;
7656
7657         if (!nr_programs)
7658                 return NULL;
7659
7660         if (!p)
7661                 /* Iter from the beginning */
7662                 return forward ? &obj->programs[0] :
7663                         &obj->programs[nr_programs - 1];
7664
7665         if (p->obj != obj) {
7666                 pr_warn("error: program handler doesn't match object\n");
7667                 return errno = EINVAL, NULL;
7668         }
7669
7670         idx = (p - obj->programs) + (forward ? 1 : -1);
7671         if (idx >= obj->nr_programs || idx < 0)
7672                 return NULL;
7673         return &obj->programs[idx];
7674 }
7675
7676 struct bpf_program *
7677 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
7678 {
7679         struct bpf_program *prog = prev;
7680
7681         do {
7682                 prog = __bpf_program__iter(prog, obj, true);
7683         } while (prog && prog_is_subprog(obj, prog));
7684
7685         return prog;
7686 }
7687
7688 struct bpf_program *
7689 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
7690 {
7691         struct bpf_program *prog = next;
7692
7693         do {
7694                 prog = __bpf_program__iter(prog, obj, false);
7695         } while (prog && prog_is_subprog(obj, prog));
7696
7697         return prog;
7698 }
7699
7700 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
7701                           bpf_program_clear_priv_t clear_priv)
7702 {
7703         if (prog->priv && prog->clear_priv)
7704                 prog->clear_priv(prog, prog->priv);
7705
7706         prog->priv = priv;
7707         prog->clear_priv = clear_priv;
7708         return 0;
7709 }
7710
7711 void *bpf_program__priv(const struct bpf_program *prog)
7712 {
7713         return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
7714 }
7715
7716 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
7717 {
7718         prog->prog_ifindex = ifindex;
7719 }
7720
7721 const char *bpf_program__name(const struct bpf_program *prog)
7722 {
7723         return prog->name;
7724 }
7725
7726 const char *bpf_program__section_name(const struct bpf_program *prog)
7727 {
7728         return prog->sec_name;
7729 }
7730
7731 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
7732 {
7733         const char *title;
7734
7735         title = prog->sec_name;
7736         if (needs_copy) {
7737                 title = strdup(title);
7738                 if (!title) {
7739                         pr_warn("failed to strdup program title\n");
7740                         return libbpf_err_ptr(-ENOMEM);
7741                 }
7742         }
7743
7744         return title;
7745 }
7746
7747 bool bpf_program__autoload(const struct bpf_program *prog)
7748 {
7749         return prog->load;
7750 }
7751
7752 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
7753 {
7754         if (prog->obj->loaded)
7755                 return libbpf_err(-EINVAL);
7756
7757         prog->load = autoload;
7758         return 0;
7759 }
7760
7761 int bpf_program__fd(const struct bpf_program *prog)
7762 {
7763         return bpf_program__nth_fd(prog, 0);
7764 }
7765
7766 size_t bpf_program__size(const struct bpf_program *prog)
7767 {
7768         return prog->insns_cnt * BPF_INSN_SZ;
7769 }
7770
7771 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
7772                           bpf_program_prep_t prep)
7773 {
7774         int *instances_fds;
7775
7776         if (nr_instances <= 0 || !prep)
7777                 return libbpf_err(-EINVAL);
7778
7779         if (prog->instances.nr > 0 || prog->instances.fds) {
7780                 pr_warn("Can't set pre-processor after loading\n");
7781                 return libbpf_err(-EINVAL);
7782         }
7783
7784         instances_fds = malloc(sizeof(int) * nr_instances);
7785         if (!instances_fds) {
7786                 pr_warn("alloc memory failed for fds\n");
7787                 return libbpf_err(-ENOMEM);
7788         }
7789
7790         /* fill all fd with -1 */
7791         memset(instances_fds, -1, sizeof(int) * nr_instances);
7792
7793         prog->instances.nr = nr_instances;
7794         prog->instances.fds = instances_fds;
7795         prog->preprocessor = prep;
7796         return 0;
7797 }
7798
7799 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
7800 {
7801         int fd;
7802
7803         if (!prog)
7804                 return libbpf_err(-EINVAL);
7805
7806         if (n >= prog->instances.nr || n < 0) {
7807                 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
7808                         n, prog->name, prog->instances.nr);
7809                 return libbpf_err(-EINVAL);
7810         }
7811
7812         fd = prog->instances.fds[n];
7813         if (fd < 0) {
7814                 pr_warn("%dth instance of program '%s' is invalid\n",
7815                         n, prog->name);
7816                 return libbpf_err(-ENOENT);
7817         }
7818
7819         return fd;
7820 }
7821
7822 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
7823 {
7824         return prog->type;
7825 }
7826
7827 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
7828 {
7829         prog->type = type;
7830 }
7831
7832 static bool bpf_program__is_type(const struct bpf_program *prog,
7833                                  enum bpf_prog_type type)
7834 {
7835         return prog ? (prog->type == type) : false;
7836 }
7837
7838 #define BPF_PROG_TYPE_FNS(NAME, TYPE)                           \
7839 int bpf_program__set_##NAME(struct bpf_program *prog)           \
7840 {                                                               \
7841         if (!prog)                                              \
7842                 return libbpf_err(-EINVAL);                     \
7843         bpf_program__set_type(prog, TYPE);                      \
7844         return 0;                                               \
7845 }                                                               \
7846                                                                 \
7847 bool bpf_program__is_##NAME(const struct bpf_program *prog)     \
7848 {                                                               \
7849         return bpf_program__is_type(prog, TYPE);                \
7850 }                                                               \
7851
7852 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
7853 BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
7854 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
7855 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
7856 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
7857 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
7858 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
7859 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
7860 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
7861 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
7862 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
7863 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
7864 BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
7865
7866 enum bpf_attach_type
7867 bpf_program__get_expected_attach_type(const struct bpf_program *prog)
7868 {
7869         return prog->expected_attach_type;
7870 }
7871
7872 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
7873                                            enum bpf_attach_type type)
7874 {
7875         prog->expected_attach_type = type;
7876 }
7877
7878 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,           \
7879                           attachable, attach_btf)                           \
7880         {                                                                   \
7881                 .sec = string,                                              \
7882                 .len = sizeof(string) - 1,                                  \
7883                 .prog_type = ptype,                                         \
7884                 .expected_attach_type = eatype,                             \
7885                 .is_exp_attach_type_optional = eatype_optional,             \
7886                 .is_attachable = attachable,                                \
7887                 .is_attach_btf = attach_btf,                                \
7888         }
7889
7890 /* Programs that can NOT be attached. */
7891 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
7892
7893 /* Programs that can be attached. */
7894 #define BPF_APROG_SEC(string, ptype, atype) \
7895         BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
7896
7897 /* Programs that must specify expected attach type at load time. */
7898 #define BPF_EAPROG_SEC(string, ptype, eatype) \
7899         BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
7900
7901 /* Programs that use BTF to identify attach point */
7902 #define BPF_PROG_BTF(string, ptype, eatype) \
7903         BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
7904
7905 /* Programs that can be attached but attach type can't be identified by section
7906  * name. Kept for backward compatibility.
7907  */
7908 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
7909
7910 #define SEC_DEF(sec_pfx, ptype, ...) {                                      \
7911         .sec = sec_pfx,                                                     \
7912         .len = sizeof(sec_pfx) - 1,                                         \
7913         .prog_type = BPF_PROG_TYPE_##ptype,                                 \
7914         __VA_ARGS__                                                         \
7915 }
7916
7917 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
7918                                       struct bpf_program *prog);
7919 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
7920                                   struct bpf_program *prog);
7921 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
7922                                       struct bpf_program *prog);
7923 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
7924                                      struct bpf_program *prog);
7925 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
7926                                    struct bpf_program *prog);
7927 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
7928                                     struct bpf_program *prog);
7929
7930 static const struct bpf_sec_def section_defs[] = {
7931         BPF_PROG_SEC("socket",                  BPF_PROG_TYPE_SOCKET_FILTER),
7932         BPF_EAPROG_SEC("sk_reuseport/migrate",  BPF_PROG_TYPE_SK_REUSEPORT,
7933                                                 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE),
7934         BPF_EAPROG_SEC("sk_reuseport",          BPF_PROG_TYPE_SK_REUSEPORT,
7935                                                 BPF_SK_REUSEPORT_SELECT),
7936         SEC_DEF("kprobe/", KPROBE,
7937                 .attach_fn = attach_kprobe),
7938         BPF_PROG_SEC("uprobe/",                 BPF_PROG_TYPE_KPROBE),
7939         SEC_DEF("kretprobe/", KPROBE,
7940                 .attach_fn = attach_kprobe),
7941         BPF_PROG_SEC("uretprobe/",              BPF_PROG_TYPE_KPROBE),
7942         BPF_PROG_SEC("classifier",              BPF_PROG_TYPE_SCHED_CLS),
7943         BPF_PROG_SEC("action",                  BPF_PROG_TYPE_SCHED_ACT),
7944         SEC_DEF("tracepoint/", TRACEPOINT,
7945                 .attach_fn = attach_tp),
7946         SEC_DEF("tp/", TRACEPOINT,
7947                 .attach_fn = attach_tp),
7948         SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
7949                 .attach_fn = attach_raw_tp),
7950         SEC_DEF("raw_tp/", RAW_TRACEPOINT,
7951                 .attach_fn = attach_raw_tp),
7952         SEC_DEF("tp_btf/", TRACING,
7953                 .expected_attach_type = BPF_TRACE_RAW_TP,
7954                 .is_attach_btf = true,
7955                 .attach_fn = attach_trace),
7956         SEC_DEF("fentry/", TRACING,
7957                 .expected_attach_type = BPF_TRACE_FENTRY,
7958                 .is_attach_btf = true,
7959                 .attach_fn = attach_trace),
7960         SEC_DEF("fmod_ret/", TRACING,
7961                 .expected_attach_type = BPF_MODIFY_RETURN,
7962                 .is_attach_btf = true,
7963                 .attach_fn = attach_trace),
7964         SEC_DEF("fexit/", TRACING,
7965                 .expected_attach_type = BPF_TRACE_FEXIT,
7966                 .is_attach_btf = true,
7967                 .attach_fn = attach_trace),
7968         SEC_DEF("fentry.s/", TRACING,
7969                 .expected_attach_type = BPF_TRACE_FENTRY,
7970                 .is_attach_btf = true,
7971                 .is_sleepable = true,
7972                 .attach_fn = attach_trace),
7973         SEC_DEF("fmod_ret.s/", TRACING,
7974                 .expected_attach_type = BPF_MODIFY_RETURN,
7975                 .is_attach_btf = true,
7976                 .is_sleepable = true,
7977                 .attach_fn = attach_trace),
7978         SEC_DEF("fexit.s/", TRACING,
7979                 .expected_attach_type = BPF_TRACE_FEXIT,
7980                 .is_attach_btf = true,
7981                 .is_sleepable = true,
7982                 .attach_fn = attach_trace),
7983         SEC_DEF("freplace/", EXT,
7984                 .is_attach_btf = true,
7985                 .attach_fn = attach_trace),
7986         SEC_DEF("lsm/", LSM,
7987                 .is_attach_btf = true,
7988                 .expected_attach_type = BPF_LSM_MAC,
7989                 .attach_fn = attach_lsm),
7990         SEC_DEF("lsm.s/", LSM,
7991                 .is_attach_btf = true,
7992                 .is_sleepable = true,
7993                 .expected_attach_type = BPF_LSM_MAC,
7994                 .attach_fn = attach_lsm),
7995         SEC_DEF("iter/", TRACING,
7996                 .expected_attach_type = BPF_TRACE_ITER,
7997                 .is_attach_btf = true,
7998                 .attach_fn = attach_iter),
7999         SEC_DEF("syscall", SYSCALL,
8000                 .is_sleepable = true),
8001         BPF_EAPROG_SEC("xdp_devmap/",           BPF_PROG_TYPE_XDP,
8002                                                 BPF_XDP_DEVMAP),
8003         BPF_EAPROG_SEC("xdp_cpumap/",           BPF_PROG_TYPE_XDP,
8004                                                 BPF_XDP_CPUMAP),
8005         BPF_APROG_SEC("xdp",                    BPF_PROG_TYPE_XDP,
8006                                                 BPF_XDP),
8007         BPF_PROG_SEC("perf_event",              BPF_PROG_TYPE_PERF_EVENT),
8008         BPF_PROG_SEC("lwt_in",                  BPF_PROG_TYPE_LWT_IN),
8009         BPF_PROG_SEC("lwt_out",                 BPF_PROG_TYPE_LWT_OUT),
8010         BPF_PROG_SEC("lwt_xmit",                BPF_PROG_TYPE_LWT_XMIT),
8011         BPF_PROG_SEC("lwt_seg6local",           BPF_PROG_TYPE_LWT_SEG6LOCAL),
8012         BPF_APROG_SEC("cgroup_skb/ingress",     BPF_PROG_TYPE_CGROUP_SKB,
8013                                                 BPF_CGROUP_INET_INGRESS),
8014         BPF_APROG_SEC("cgroup_skb/egress",      BPF_PROG_TYPE_CGROUP_SKB,
8015                                                 BPF_CGROUP_INET_EGRESS),
8016         BPF_APROG_COMPAT("cgroup/skb",          BPF_PROG_TYPE_CGROUP_SKB),
8017         BPF_EAPROG_SEC("cgroup/sock_create",    BPF_PROG_TYPE_CGROUP_SOCK,
8018                                                 BPF_CGROUP_INET_SOCK_CREATE),
8019         BPF_EAPROG_SEC("cgroup/sock_release",   BPF_PROG_TYPE_CGROUP_SOCK,
8020                                                 BPF_CGROUP_INET_SOCK_RELEASE),
8021         BPF_APROG_SEC("cgroup/sock",            BPF_PROG_TYPE_CGROUP_SOCK,
8022                                                 BPF_CGROUP_INET_SOCK_CREATE),
8023         BPF_EAPROG_SEC("cgroup/post_bind4",     BPF_PROG_TYPE_CGROUP_SOCK,
8024                                                 BPF_CGROUP_INET4_POST_BIND),
8025         BPF_EAPROG_SEC("cgroup/post_bind6",     BPF_PROG_TYPE_CGROUP_SOCK,
8026                                                 BPF_CGROUP_INET6_POST_BIND),
8027         BPF_APROG_SEC("cgroup/dev",             BPF_PROG_TYPE_CGROUP_DEVICE,
8028                                                 BPF_CGROUP_DEVICE),
8029         BPF_APROG_SEC("sockops",                BPF_PROG_TYPE_SOCK_OPS,
8030                                                 BPF_CGROUP_SOCK_OPS),
8031         BPF_APROG_SEC("sk_skb/stream_parser",   BPF_PROG_TYPE_SK_SKB,
8032                                                 BPF_SK_SKB_STREAM_PARSER),
8033         BPF_APROG_SEC("sk_skb/stream_verdict",  BPF_PROG_TYPE_SK_SKB,
8034                                                 BPF_SK_SKB_STREAM_VERDICT),
8035         BPF_APROG_COMPAT("sk_skb",              BPF_PROG_TYPE_SK_SKB),
8036         BPF_APROG_SEC("sk_msg",                 BPF_PROG_TYPE_SK_MSG,
8037                                                 BPF_SK_MSG_VERDICT),
8038         BPF_APROG_SEC("lirc_mode2",             BPF_PROG_TYPE_LIRC_MODE2,
8039                                                 BPF_LIRC_MODE2),
8040         BPF_APROG_SEC("flow_dissector",         BPF_PROG_TYPE_FLOW_DISSECTOR,
8041                                                 BPF_FLOW_DISSECTOR),
8042         BPF_EAPROG_SEC("cgroup/bind4",          BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8043                                                 BPF_CGROUP_INET4_BIND),
8044         BPF_EAPROG_SEC("cgroup/bind6",          BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8045                                                 BPF_CGROUP_INET6_BIND),
8046         BPF_EAPROG_SEC("cgroup/connect4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8047                                                 BPF_CGROUP_INET4_CONNECT),
8048         BPF_EAPROG_SEC("cgroup/connect6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8049                                                 BPF_CGROUP_INET6_CONNECT),
8050         BPF_EAPROG_SEC("cgroup/sendmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8051                                                 BPF_CGROUP_UDP4_SENDMSG),
8052         BPF_EAPROG_SEC("cgroup/sendmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8053                                                 BPF_CGROUP_UDP6_SENDMSG),
8054         BPF_EAPROG_SEC("cgroup/recvmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8055                                                 BPF_CGROUP_UDP4_RECVMSG),
8056         BPF_EAPROG_SEC("cgroup/recvmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8057                                                 BPF_CGROUP_UDP6_RECVMSG),
8058         BPF_EAPROG_SEC("cgroup/getpeername4",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8059                                                 BPF_CGROUP_INET4_GETPEERNAME),
8060         BPF_EAPROG_SEC("cgroup/getpeername6",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8061                                                 BPF_CGROUP_INET6_GETPEERNAME),
8062         BPF_EAPROG_SEC("cgroup/getsockname4",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8063                                                 BPF_CGROUP_INET4_GETSOCKNAME),
8064         BPF_EAPROG_SEC("cgroup/getsockname6",   BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8065                                                 BPF_CGROUP_INET6_GETSOCKNAME),
8066         BPF_EAPROG_SEC("cgroup/sysctl",         BPF_PROG_TYPE_CGROUP_SYSCTL,
8067                                                 BPF_CGROUP_SYSCTL),
8068         BPF_EAPROG_SEC("cgroup/getsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
8069                                                 BPF_CGROUP_GETSOCKOPT),
8070         BPF_EAPROG_SEC("cgroup/setsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
8071                                                 BPF_CGROUP_SETSOCKOPT),
8072         BPF_PROG_SEC("struct_ops",              BPF_PROG_TYPE_STRUCT_OPS),
8073         BPF_EAPROG_SEC("sk_lookup/",            BPF_PROG_TYPE_SK_LOOKUP,
8074                                                 BPF_SK_LOOKUP),
8075 };
8076
8077 #undef BPF_PROG_SEC_IMPL
8078 #undef BPF_PROG_SEC
8079 #undef BPF_APROG_SEC
8080 #undef BPF_EAPROG_SEC
8081 #undef BPF_APROG_COMPAT
8082 #undef SEC_DEF
8083
8084 #define MAX_TYPE_NAME_SIZE 32
8085
8086 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8087 {
8088         int i, n = ARRAY_SIZE(section_defs);
8089
8090         for (i = 0; i < n; i++) {
8091                 if (strncmp(sec_name,
8092                             section_defs[i].sec, section_defs[i].len))
8093                         continue;
8094                 return &section_defs[i];
8095         }
8096         return NULL;
8097 }
8098
8099 static char *libbpf_get_type_names(bool attach_type)
8100 {
8101         int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8102         char *buf;
8103
8104         buf = malloc(len);
8105         if (!buf)
8106                 return NULL;
8107
8108         buf[0] = '\0';
8109         /* Forge string buf with all available names */
8110         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8111                 if (attach_type && !section_defs[i].is_attachable)
8112                         continue;
8113
8114                 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8115                         free(buf);
8116                         return NULL;
8117                 }
8118                 strcat(buf, " ");
8119                 strcat(buf, section_defs[i].sec);
8120         }
8121
8122         return buf;
8123 }
8124
8125 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8126                              enum bpf_attach_type *expected_attach_type)
8127 {
8128         const struct bpf_sec_def *sec_def;
8129         char *type_names;
8130
8131         if (!name)
8132                 return libbpf_err(-EINVAL);
8133
8134         sec_def = find_sec_def(name);
8135         if (sec_def) {
8136                 *prog_type = sec_def->prog_type;
8137                 *expected_attach_type = sec_def->expected_attach_type;
8138                 return 0;
8139         }
8140
8141         pr_debug("failed to guess program type from ELF section '%s'\n", name);
8142         type_names = libbpf_get_type_names(false);
8143         if (type_names != NULL) {
8144                 pr_debug("supported section(type) names are:%s\n", type_names);
8145                 free(type_names);
8146         }
8147
8148         return libbpf_err(-ESRCH);
8149 }
8150
8151 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8152                                                      size_t offset)
8153 {
8154         struct bpf_map *map;
8155         size_t i;
8156
8157         for (i = 0; i < obj->nr_maps; i++) {
8158                 map = &obj->maps[i];
8159                 if (!bpf_map__is_struct_ops(map))
8160                         continue;
8161                 if (map->sec_offset <= offset &&
8162                     offset - map->sec_offset < map->def.value_size)
8163                         return map;
8164         }
8165
8166         return NULL;
8167 }
8168
8169 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8170 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8171                                             GElf_Shdr *shdr, Elf_Data *data)
8172 {
8173         const struct btf_member *member;
8174         struct bpf_struct_ops *st_ops;
8175         struct bpf_program *prog;
8176         unsigned int shdr_idx;
8177         const struct btf *btf;
8178         struct bpf_map *map;
8179         Elf_Data *symbols;
8180         unsigned int moff, insn_idx;
8181         const char *name;
8182         __u32 member_idx;
8183         GElf_Sym sym;
8184         GElf_Rel rel;
8185         int i, nrels;
8186
8187         symbols = obj->efile.symbols;
8188         btf = obj->btf;
8189         nrels = shdr->sh_size / shdr->sh_entsize;
8190         for (i = 0; i < nrels; i++) {
8191                 if (!gelf_getrel(data, i, &rel)) {
8192                         pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8193                         return -LIBBPF_ERRNO__FORMAT;
8194                 }
8195
8196                 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8197                         pr_warn("struct_ops reloc: symbol %zx not found\n",
8198                                 (size_t)GELF_R_SYM(rel.r_info));
8199                         return -LIBBPF_ERRNO__FORMAT;
8200                 }
8201
8202                 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8203                 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8204                 if (!map) {
8205                         pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8206                                 (size_t)rel.r_offset);
8207                         return -EINVAL;
8208                 }
8209
8210                 moff = rel.r_offset - map->sec_offset;
8211                 shdr_idx = sym.st_shndx;
8212                 st_ops = map->st_ops;
8213                 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8214                          map->name,
8215                          (long long)(rel.r_info >> 32),
8216                          (long long)sym.st_value,
8217                          shdr_idx, (size_t)rel.r_offset,
8218                          map->sec_offset, sym.st_name, name);
8219
8220                 if (shdr_idx >= SHN_LORESERVE) {
8221                         pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8222                                 map->name, (size_t)rel.r_offset, shdr_idx);
8223                         return -LIBBPF_ERRNO__RELOC;
8224                 }
8225                 if (sym.st_value % BPF_INSN_SZ) {
8226                         pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8227                                 map->name, (unsigned long long)sym.st_value);
8228                         return -LIBBPF_ERRNO__FORMAT;
8229                 }
8230                 insn_idx = sym.st_value / BPF_INSN_SZ;
8231
8232                 member = find_member_by_offset(st_ops->type, moff * 8);
8233                 if (!member) {
8234                         pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8235                                 map->name, moff);
8236                         return -EINVAL;
8237                 }
8238                 member_idx = member - btf_members(st_ops->type);
8239                 name = btf__name_by_offset(btf, member->name_off);
8240
8241                 if (!resolve_func_ptr(btf, member->type, NULL)) {
8242                         pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8243                                 map->name, name);
8244                         return -EINVAL;
8245                 }
8246
8247                 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8248                 if (!prog) {
8249                         pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8250                                 map->name, shdr_idx, name);
8251                         return -EINVAL;
8252                 }
8253
8254                 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8255                         const struct bpf_sec_def *sec_def;
8256
8257                         sec_def = find_sec_def(prog->sec_name);
8258                         if (sec_def &&
8259                             sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8260                                 /* for pr_warn */
8261                                 prog->type = sec_def->prog_type;
8262                                 goto invalid_prog;
8263                         }
8264
8265                         prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8266                         prog->attach_btf_id = st_ops->type_id;
8267                         prog->expected_attach_type = member_idx;
8268                 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8269                            prog->attach_btf_id != st_ops->type_id ||
8270                            prog->expected_attach_type != member_idx) {
8271                         goto invalid_prog;
8272                 }
8273                 st_ops->progs[member_idx] = prog;
8274         }
8275
8276         return 0;
8277
8278 invalid_prog:
8279         pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8280                 map->name, prog->name, prog->sec_name, prog->type,
8281                 prog->attach_btf_id, prog->expected_attach_type, name);
8282         return -EINVAL;
8283 }
8284
8285 #define BTF_TRACE_PREFIX "btf_trace_"
8286 #define BTF_LSM_PREFIX "bpf_lsm_"
8287 #define BTF_ITER_PREFIX "bpf_iter_"
8288 #define BTF_MAX_NAME_SIZE 128
8289
8290 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
8291                                 const char **prefix, int *kind)
8292 {
8293         switch (attach_type) {
8294         case BPF_TRACE_RAW_TP:
8295                 *prefix = BTF_TRACE_PREFIX;
8296                 *kind = BTF_KIND_TYPEDEF;
8297                 break;
8298         case BPF_LSM_MAC:
8299                 *prefix = BTF_LSM_PREFIX;
8300                 *kind = BTF_KIND_FUNC;
8301                 break;
8302         case BPF_TRACE_ITER:
8303                 *prefix = BTF_ITER_PREFIX;
8304                 *kind = BTF_KIND_FUNC;
8305                 break;
8306         default:
8307                 *prefix = "";
8308                 *kind = BTF_KIND_FUNC;
8309         }
8310 }
8311
8312 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8313                                    const char *name, __u32 kind)
8314 {
8315         char btf_type_name[BTF_MAX_NAME_SIZE];
8316         int ret;
8317
8318         ret = snprintf(btf_type_name, sizeof(btf_type_name),
8319                        "%s%s", prefix, name);
8320         /* snprintf returns the number of characters written excluding the
8321          * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
8322          * indicates truncation.
8323          */
8324         if (ret < 0 || ret >= sizeof(btf_type_name))
8325                 return -ENAMETOOLONG;
8326         return btf__find_by_name_kind(btf, btf_type_name, kind);
8327 }
8328
8329 static inline int find_attach_btf_id(struct btf *btf, const char *name,
8330                                      enum bpf_attach_type attach_type)
8331 {
8332         const char *prefix;
8333         int kind;
8334
8335         btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
8336         return find_btf_by_prefix_kind(btf, prefix, name, kind);
8337 }
8338
8339 int libbpf_find_vmlinux_btf_id(const char *name,
8340                                enum bpf_attach_type attach_type)
8341 {
8342         struct btf *btf;
8343         int err;
8344
8345         btf = btf__load_vmlinux_btf();
8346         err = libbpf_get_error(btf);
8347         if (err) {
8348                 pr_warn("vmlinux BTF is not found\n");
8349                 return libbpf_err(err);
8350         }
8351
8352         err = find_attach_btf_id(btf, name, attach_type);
8353         if (err <= 0)
8354                 pr_warn("%s is not found in vmlinux BTF\n", name);
8355
8356         btf__free(btf);
8357         return libbpf_err(err);
8358 }
8359
8360 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8361 {
8362         struct bpf_prog_info_linear *info_linear;
8363         struct bpf_prog_info *info;
8364         struct btf *btf;
8365         int err;
8366
8367         info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8368         err = libbpf_get_error(info_linear);
8369         if (err) {
8370                 pr_warn("failed get_prog_info_linear for FD %d\n",
8371                         attach_prog_fd);
8372                 return err;
8373         }
8374
8375         err = -EINVAL;
8376         info = &info_linear->info;
8377         if (!info->btf_id) {
8378                 pr_warn("The target program doesn't have BTF\n");
8379                 goto out;
8380         }
8381         btf = btf__load_from_kernel_by_id(info->btf_id);
8382         if (libbpf_get_error(btf)) {
8383                 pr_warn("Failed to get BTF of the program\n");
8384                 goto out;
8385         }
8386         err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8387         btf__free(btf);
8388         if (err <= 0) {
8389                 pr_warn("%s is not found in prog's BTF\n", name);
8390                 goto out;
8391         }
8392 out:
8393         free(info_linear);
8394         return err;
8395 }
8396
8397 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
8398                               enum bpf_attach_type attach_type,
8399                               int *btf_obj_fd, int *btf_type_id)
8400 {
8401         int ret, i;
8402
8403         ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
8404         if (ret > 0) {
8405                 *btf_obj_fd = 0; /* vmlinux BTF */
8406                 *btf_type_id = ret;
8407                 return 0;
8408         }
8409         if (ret != -ENOENT)
8410                 return ret;
8411
8412         ret = load_module_btfs(obj);
8413         if (ret)
8414                 return ret;
8415
8416         for (i = 0; i < obj->btf_module_cnt; i++) {
8417                 const struct module_btf *mod = &obj->btf_modules[i];
8418
8419                 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
8420                 if (ret > 0) {
8421                         *btf_obj_fd = mod->fd;
8422                         *btf_type_id = ret;
8423                         return 0;
8424                 }
8425                 if (ret == -ENOENT)
8426                         continue;
8427
8428                 return ret;
8429         }
8430
8431         return -ESRCH;
8432 }
8433
8434 static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
8435 {
8436         enum bpf_attach_type attach_type = prog->expected_attach_type;
8437         __u32 attach_prog_fd = prog->attach_prog_fd;
8438         const char *name = prog->sec_name, *attach_name;
8439         const struct bpf_sec_def *sec = NULL;
8440         int i, err = 0;
8441
8442         if (!name)
8443                 return -EINVAL;
8444
8445         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8446                 if (!section_defs[i].is_attach_btf)
8447                         continue;
8448                 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8449                         continue;
8450
8451                 sec = &section_defs[i];
8452                 break;
8453         }
8454
8455         if (!sec) {
8456                 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
8457                 return -ESRCH;
8458         }
8459         attach_name = name + sec->len;
8460
8461         /* BPF program's BTF ID */
8462         if (attach_prog_fd) {
8463                 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
8464                 if (err < 0) {
8465                         pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
8466                                  attach_prog_fd, attach_name, err);
8467                         return err;
8468                 }
8469                 *btf_obj_fd = 0;
8470                 *btf_type_id = err;
8471                 return 0;
8472         }
8473
8474         /* kernel/module BTF ID */
8475         if (prog->obj->gen_loader) {
8476                 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
8477                 *btf_obj_fd = 0;
8478                 *btf_type_id = 1;
8479         } else {
8480                 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
8481         }
8482         if (err) {
8483                 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
8484                 return err;
8485         }
8486         return 0;
8487 }
8488
8489 int libbpf_attach_type_by_name(const char *name,
8490                                enum bpf_attach_type *attach_type)
8491 {
8492         char *type_names;
8493         int i;
8494
8495         if (!name)
8496                 return libbpf_err(-EINVAL);
8497
8498         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8499                 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8500                         continue;
8501                 if (!section_defs[i].is_attachable)
8502                         return libbpf_err(-EINVAL);
8503                 *attach_type = section_defs[i].expected_attach_type;
8504                 return 0;
8505         }
8506         pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
8507         type_names = libbpf_get_type_names(true);
8508         if (type_names != NULL) {
8509                 pr_debug("attachable section(type) names are:%s\n", type_names);
8510                 free(type_names);
8511         }
8512
8513         return libbpf_err(-EINVAL);
8514 }
8515
8516 int bpf_map__fd(const struct bpf_map *map)
8517 {
8518         return map ? map->fd : libbpf_err(-EINVAL);
8519 }
8520
8521 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
8522 {
8523         return map ? &map->def : libbpf_err_ptr(-EINVAL);
8524 }
8525
8526 const char *bpf_map__name(const struct bpf_map *map)
8527 {
8528         return map ? map->name : NULL;
8529 }
8530
8531 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
8532 {
8533         return map->def.type;
8534 }
8535
8536 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
8537 {
8538         if (map->fd >= 0)
8539                 return libbpf_err(-EBUSY);
8540         map->def.type = type;
8541         return 0;
8542 }
8543
8544 __u32 bpf_map__map_flags(const struct bpf_map *map)
8545 {
8546         return map->def.map_flags;
8547 }
8548
8549 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
8550 {
8551         if (map->fd >= 0)
8552                 return libbpf_err(-EBUSY);
8553         map->def.map_flags = flags;
8554         return 0;
8555 }
8556
8557 __u32 bpf_map__numa_node(const struct bpf_map *map)
8558 {
8559         return map->numa_node;
8560 }
8561
8562 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
8563 {
8564         if (map->fd >= 0)
8565                 return libbpf_err(-EBUSY);
8566         map->numa_node = numa_node;
8567         return 0;
8568 }
8569
8570 __u32 bpf_map__key_size(const struct bpf_map *map)
8571 {
8572         return map->def.key_size;
8573 }
8574
8575 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
8576 {
8577         if (map->fd >= 0)
8578                 return libbpf_err(-EBUSY);
8579         map->def.key_size = size;
8580         return 0;
8581 }
8582
8583 __u32 bpf_map__value_size(const struct bpf_map *map)
8584 {
8585         return map->def.value_size;
8586 }
8587
8588 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
8589 {
8590         if (map->fd >= 0)
8591                 return libbpf_err(-EBUSY);
8592         map->def.value_size = size;
8593         return 0;
8594 }
8595
8596 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
8597 {
8598         return map ? map->btf_key_type_id : 0;
8599 }
8600
8601 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
8602 {
8603         return map ? map->btf_value_type_id : 0;
8604 }
8605
8606 int bpf_map__set_priv(struct bpf_map *map, void *priv,
8607                      bpf_map_clear_priv_t clear_priv)
8608 {
8609         if (!map)
8610                 return libbpf_err(-EINVAL);
8611
8612         if (map->priv) {
8613                 if (map->clear_priv)
8614                         map->clear_priv(map, map->priv);
8615         }
8616
8617         map->priv = priv;
8618         map->clear_priv = clear_priv;
8619         return 0;
8620 }
8621
8622 void *bpf_map__priv(const struct bpf_map *map)
8623 {
8624         return map ? map->priv : libbpf_err_ptr(-EINVAL);
8625 }
8626
8627 int bpf_map__set_initial_value(struct bpf_map *map,
8628                                const void *data, size_t size)
8629 {
8630         if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
8631             size != map->def.value_size || map->fd >= 0)
8632                 return libbpf_err(-EINVAL);
8633
8634         memcpy(map->mmaped, data, size);
8635         return 0;
8636 }
8637
8638 const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
8639 {
8640         if (!map->mmaped)
8641                 return NULL;
8642         *psize = map->def.value_size;
8643         return map->mmaped;
8644 }
8645
8646 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
8647 {
8648         return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
8649 }
8650
8651 bool bpf_map__is_internal(const struct bpf_map *map)
8652 {
8653         return map->libbpf_type != LIBBPF_MAP_UNSPEC;
8654 }
8655
8656 __u32 bpf_map__ifindex(const struct bpf_map *map)
8657 {
8658         return map->map_ifindex;
8659 }
8660
8661 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
8662 {
8663         if (map->fd >= 0)
8664                 return libbpf_err(-EBUSY);
8665         map->map_ifindex = ifindex;
8666         return 0;
8667 }
8668
8669 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
8670 {
8671         if (!bpf_map_type__is_map_in_map(map->def.type)) {
8672                 pr_warn("error: unsupported map type\n");
8673                 return libbpf_err(-EINVAL);
8674         }
8675         if (map->inner_map_fd != -1) {
8676                 pr_warn("error: inner_map_fd already specified\n");
8677                 return libbpf_err(-EINVAL);
8678         }
8679         if (map->inner_map) {
8680                 bpf_map__destroy(map->inner_map);
8681                 zfree(&map->inner_map);
8682         }
8683         map->inner_map_fd = fd;
8684         return 0;
8685 }
8686
8687 static struct bpf_map *
8688 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
8689 {
8690         ssize_t idx;
8691         struct bpf_map *s, *e;
8692
8693         if (!obj || !obj->maps)
8694                 return errno = EINVAL, NULL;
8695
8696         s = obj->maps;
8697         e = obj->maps + obj->nr_maps;
8698
8699         if ((m < s) || (m >= e)) {
8700                 pr_warn("error in %s: map handler doesn't belong to object\n",
8701                          __func__);
8702                 return errno = EINVAL, NULL;
8703         }
8704
8705         idx = (m - obj->maps) + i;
8706         if (idx >= obj->nr_maps || idx < 0)
8707                 return NULL;
8708         return &obj->maps[idx];
8709 }
8710
8711 struct bpf_map *
8712 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
8713 {
8714         if (prev == NULL)
8715                 return obj->maps;
8716
8717         return __bpf_map__iter(prev, obj, 1);
8718 }
8719
8720 struct bpf_map *
8721 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
8722 {
8723         if (next == NULL) {
8724                 if (!obj->nr_maps)
8725                         return NULL;
8726                 return obj->maps + obj->nr_maps - 1;
8727         }
8728
8729         return __bpf_map__iter(next, obj, -1);
8730 }
8731
8732 struct bpf_map *
8733 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
8734 {
8735         struct bpf_map *pos;
8736
8737         bpf_object__for_each_map(pos, obj) {
8738                 if (pos->name && !strcmp(pos->name, name))
8739                         return pos;
8740         }
8741         return errno = ENOENT, NULL;
8742 }
8743
8744 int
8745 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
8746 {
8747         return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
8748 }
8749
8750 struct bpf_map *
8751 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
8752 {
8753         return libbpf_err_ptr(-ENOTSUP);
8754 }
8755
8756 long libbpf_get_error(const void *ptr)
8757 {
8758         if (!IS_ERR_OR_NULL(ptr))
8759                 return 0;
8760
8761         if (IS_ERR(ptr))
8762                 errno = -PTR_ERR(ptr);
8763
8764         /* If ptr == NULL, then errno should be already set by the failing
8765          * API, because libbpf never returns NULL on success and it now always
8766          * sets errno on error. So no extra errno handling for ptr == NULL
8767          * case.
8768          */
8769         return -errno;
8770 }
8771
8772 int bpf_prog_load(const char *file, enum bpf_prog_type type,
8773                   struct bpf_object **pobj, int *prog_fd)
8774 {
8775         struct bpf_prog_load_attr attr;
8776
8777         memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
8778         attr.file = file;
8779         attr.prog_type = type;
8780         attr.expected_attach_type = 0;
8781
8782         return bpf_prog_load_xattr(&attr, pobj, prog_fd);
8783 }
8784
8785 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
8786                         struct bpf_object **pobj, int *prog_fd)
8787 {
8788         struct bpf_object_open_attr open_attr = {};
8789         struct bpf_program *prog, *first_prog = NULL;
8790         struct bpf_object *obj;
8791         struct bpf_map *map;
8792         int err;
8793
8794         if (!attr)
8795                 return libbpf_err(-EINVAL);
8796         if (!attr->file)
8797                 return libbpf_err(-EINVAL);
8798
8799         open_attr.file = attr->file;
8800         open_attr.prog_type = attr->prog_type;
8801
8802         obj = bpf_object__open_xattr(&open_attr);
8803         err = libbpf_get_error(obj);
8804         if (err)
8805                 return libbpf_err(-ENOENT);
8806
8807         bpf_object__for_each_program(prog, obj) {
8808                 enum bpf_attach_type attach_type = attr->expected_attach_type;
8809                 /*
8810                  * to preserve backwards compatibility, bpf_prog_load treats
8811                  * attr->prog_type, if specified, as an override to whatever
8812                  * bpf_object__open guessed
8813                  */
8814                 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
8815                         bpf_program__set_type(prog, attr->prog_type);
8816                         bpf_program__set_expected_attach_type(prog,
8817                                                               attach_type);
8818                 }
8819                 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
8820                         /*
8821                          * we haven't guessed from section name and user
8822                          * didn't provide a fallback type, too bad...
8823                          */
8824                         bpf_object__close(obj);
8825                         return libbpf_err(-EINVAL);
8826                 }
8827
8828                 prog->prog_ifindex = attr->ifindex;
8829                 prog->log_level = attr->log_level;
8830                 prog->prog_flags |= attr->prog_flags;
8831                 if (!first_prog)
8832                         first_prog = prog;
8833         }
8834
8835         bpf_object__for_each_map(map, obj) {
8836                 if (!bpf_map__is_offload_neutral(map))
8837                         map->map_ifindex = attr->ifindex;
8838         }
8839
8840         if (!first_prog) {
8841                 pr_warn("object file doesn't contain bpf program\n");
8842                 bpf_object__close(obj);
8843                 return libbpf_err(-ENOENT);
8844         }
8845
8846         err = bpf_object__load(obj);
8847         if (err) {
8848                 bpf_object__close(obj);
8849                 return libbpf_err(err);
8850         }
8851
8852         *pobj = obj;
8853         *prog_fd = bpf_program__fd(first_prog);
8854         return 0;
8855 }
8856
8857 struct bpf_link {
8858         int (*detach)(struct bpf_link *link);
8859         void (*dealloc)(struct bpf_link *link);
8860         char *pin_path;         /* NULL, if not pinned */
8861         int fd;                 /* hook FD, -1 if not applicable */
8862         bool disconnected;
8863 };
8864
8865 /* Replace link's underlying BPF program with the new one */
8866 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
8867 {
8868         int ret;
8869
8870         ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
8871         return libbpf_err_errno(ret);
8872 }
8873
8874 /* Release "ownership" of underlying BPF resource (typically, BPF program
8875  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
8876  * link, when destructed through bpf_link__destroy() call won't attempt to
8877  * detach/unregisted that BPF resource. This is useful in situations where,
8878  * say, attached BPF program has to outlive userspace program that attached it
8879  * in the system. Depending on type of BPF program, though, there might be
8880  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
8881  * exit of userspace program doesn't trigger automatic detachment and clean up
8882  * inside the kernel.
8883  */
8884 void bpf_link__disconnect(struct bpf_link *link)
8885 {
8886         link->disconnected = true;
8887 }
8888
8889 int bpf_link__destroy(struct bpf_link *link)
8890 {
8891         int err = 0;
8892
8893         if (IS_ERR_OR_NULL(link))
8894                 return 0;
8895
8896         if (!link->disconnected && link->detach)
8897                 err = link->detach(link);
8898         if (link->pin_path)
8899                 free(link->pin_path);
8900         if (link->dealloc)
8901                 link->dealloc(link);
8902         else
8903                 free(link);
8904
8905         return libbpf_err(err);
8906 }
8907
8908 int bpf_link__fd(const struct bpf_link *link)
8909 {
8910         return link->fd;
8911 }
8912
8913 const char *bpf_link__pin_path(const struct bpf_link *link)
8914 {
8915         return link->pin_path;
8916 }
8917
8918 static int bpf_link__detach_fd(struct bpf_link *link)
8919 {
8920         return libbpf_err_errno(close(link->fd));
8921 }
8922
8923 struct bpf_link *bpf_link__open(const char *path)
8924 {
8925         struct bpf_link *link;
8926         int fd;
8927
8928         fd = bpf_obj_get(path);
8929         if (fd < 0) {
8930                 fd = -errno;
8931                 pr_warn("failed to open link at %s: %d\n", path, fd);
8932                 return libbpf_err_ptr(fd);
8933         }
8934
8935         link = calloc(1, sizeof(*link));
8936         if (!link) {
8937                 close(fd);
8938                 return libbpf_err_ptr(-ENOMEM);
8939         }
8940         link->detach = &bpf_link__detach_fd;
8941         link->fd = fd;
8942
8943         link->pin_path = strdup(path);
8944         if (!link->pin_path) {
8945                 bpf_link__destroy(link);
8946                 return libbpf_err_ptr(-ENOMEM);
8947         }
8948
8949         return link;
8950 }
8951
8952 int bpf_link__detach(struct bpf_link *link)
8953 {
8954         return bpf_link_detach(link->fd) ? -errno : 0;
8955 }
8956
8957 int bpf_link__pin(struct bpf_link *link, const char *path)
8958 {
8959         int err;
8960
8961         if (link->pin_path)
8962                 return libbpf_err(-EBUSY);
8963         err = make_parent_dir(path);
8964         if (err)
8965                 return libbpf_err(err);
8966         err = check_path(path);
8967         if (err)
8968                 return libbpf_err(err);
8969
8970         link->pin_path = strdup(path);
8971         if (!link->pin_path)
8972                 return libbpf_err(-ENOMEM);
8973
8974         if (bpf_obj_pin(link->fd, link->pin_path)) {
8975                 err = -errno;
8976                 zfree(&link->pin_path);
8977                 return libbpf_err(err);
8978         }
8979
8980         pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
8981         return 0;
8982 }
8983
8984 int bpf_link__unpin(struct bpf_link *link)
8985 {
8986         int err;
8987
8988         if (!link->pin_path)
8989                 return libbpf_err(-EINVAL);
8990
8991         err = unlink(link->pin_path);
8992         if (err != 0)
8993                 return -errno;
8994
8995         pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
8996         zfree(&link->pin_path);
8997         return 0;
8998 }
8999
9000 struct bpf_link_perf {
9001         struct bpf_link link;
9002         int perf_event_fd;
9003 };
9004
9005 static int bpf_link_perf_detach(struct bpf_link *link)
9006 {
9007         struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9008         int err = 0;
9009
9010         if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
9011                 err = -errno;
9012
9013         if (perf_link->perf_event_fd != link->fd)
9014                 close(perf_link->perf_event_fd);
9015         close(link->fd);
9016
9017         return libbpf_err(err);
9018 }
9019
9020 static void bpf_link_perf_dealloc(struct bpf_link *link)
9021 {
9022         struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9023
9024         free(perf_link);
9025 }
9026
9027 struct bpf_link *bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd,
9028                                                      const struct bpf_perf_event_opts *opts)
9029 {
9030         char errmsg[STRERR_BUFSIZE];
9031         struct bpf_link_perf *link;
9032         int prog_fd, link_fd = -1, err;
9033
9034         if (!OPTS_VALID(opts, bpf_perf_event_opts))
9035                 return libbpf_err_ptr(-EINVAL);
9036
9037         if (pfd < 0) {
9038                 pr_warn("prog '%s': invalid perf event FD %d\n",
9039                         prog->name, pfd);
9040                 return libbpf_err_ptr(-EINVAL);
9041         }
9042         prog_fd = bpf_program__fd(prog);
9043         if (prog_fd < 0) {
9044                 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9045                         prog->name);
9046                 return libbpf_err_ptr(-EINVAL);
9047         }
9048
9049         link = calloc(1, sizeof(*link));
9050         if (!link)
9051                 return libbpf_err_ptr(-ENOMEM);
9052         link->link.detach = &bpf_link_perf_detach;
9053         link->link.dealloc = &bpf_link_perf_dealloc;
9054         link->perf_event_fd = pfd;
9055
9056         if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
9057                 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
9058                         .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
9059
9060                 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
9061                 if (link_fd < 0) {
9062                         err = -errno;
9063                         pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
9064                                 prog->name, pfd,
9065                                 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9066                         goto err_out;
9067                 }
9068                 link->link.fd = link_fd;
9069         } else {
9070                 if (OPTS_GET(opts, bpf_cookie, 0)) {
9071                         pr_warn("prog '%s': user context value is not supported\n", prog->name);
9072                         err = -EOPNOTSUPP;
9073                         goto err_out;
9074                 }
9075
9076                 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9077                         err = -errno;
9078                         pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
9079                                 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9080                         if (err == -EPROTO)
9081                                 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9082                                         prog->name, pfd);
9083                         goto err_out;
9084                 }
9085                 link->link.fd = pfd;
9086         }
9087         if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9088                 err = -errno;
9089                 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
9090                         prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9091                 goto err_out;
9092         }
9093
9094         return &link->link;
9095 err_out:
9096         if (link_fd >= 0)
9097                 close(link_fd);
9098         free(link);
9099         return libbpf_err_ptr(err);
9100 }
9101
9102 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
9103 {
9104         return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
9105 }
9106
9107 /*
9108  * this function is expected to parse integer in the range of [0, 2^31-1] from
9109  * given file using scanf format string fmt. If actual parsed value is
9110  * negative, the result might be indistinguishable from error
9111  */
9112 static int parse_uint_from_file(const char *file, const char *fmt)
9113 {
9114         char buf[STRERR_BUFSIZE];
9115         int err, ret;
9116         FILE *f;
9117
9118         f = fopen(file, "r");
9119         if (!f) {
9120                 err = -errno;
9121                 pr_debug("failed to open '%s': %s\n", file,
9122                          libbpf_strerror_r(err, buf, sizeof(buf)));
9123                 return err;
9124         }
9125         err = fscanf(f, fmt, &ret);
9126         if (err != 1) {
9127                 err = err == EOF ? -EIO : -errno;
9128                 pr_debug("failed to parse '%s': %s\n", file,
9129                         libbpf_strerror_r(err, buf, sizeof(buf)));
9130                 fclose(f);
9131                 return err;
9132         }
9133         fclose(f);
9134         return ret;
9135 }
9136
9137 static int determine_kprobe_perf_type(void)
9138 {
9139         const char *file = "/sys/bus/event_source/devices/kprobe/type";
9140
9141         return parse_uint_from_file(file, "%d\n");
9142 }
9143
9144 static int determine_uprobe_perf_type(void)
9145 {
9146         const char *file = "/sys/bus/event_source/devices/uprobe/type";
9147
9148         return parse_uint_from_file(file, "%d\n");
9149 }
9150
9151 static int determine_kprobe_retprobe_bit(void)
9152 {
9153         const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9154
9155         return parse_uint_from_file(file, "config:%d\n");
9156 }
9157
9158 static int determine_uprobe_retprobe_bit(void)
9159 {
9160         const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9161
9162         return parse_uint_from_file(file, "config:%d\n");
9163 }
9164
9165 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
9166 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
9167
9168 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9169                                  uint64_t offset, int pid, size_t ref_ctr_off)
9170 {
9171         struct perf_event_attr attr = {};
9172         char errmsg[STRERR_BUFSIZE];
9173         int type, pfd, err;
9174
9175         if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
9176                 return -EINVAL;
9177
9178         type = uprobe ? determine_uprobe_perf_type()
9179                       : determine_kprobe_perf_type();
9180         if (type < 0) {
9181                 pr_warn("failed to determine %s perf type: %s\n",
9182                         uprobe ? "uprobe" : "kprobe",
9183                         libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9184                 return type;
9185         }
9186         if (retprobe) {
9187                 int bit = uprobe ? determine_uprobe_retprobe_bit()
9188                                  : determine_kprobe_retprobe_bit();
9189
9190                 if (bit < 0) {
9191                         pr_warn("failed to determine %s retprobe bit: %s\n",
9192                                 uprobe ? "uprobe" : "kprobe",
9193                                 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9194                         return bit;
9195                 }
9196                 attr.config |= 1 << bit;
9197         }
9198         attr.size = sizeof(attr);
9199         attr.type = type;
9200         attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
9201         attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9202         attr.config2 = offset;           /* kprobe_addr or probe_offset */
9203
9204         /* pid filter is meaningful only for uprobes */
9205         pfd = syscall(__NR_perf_event_open, &attr,
9206                       pid < 0 ? -1 : pid /* pid */,
9207                       pid == -1 ? 0 : -1 /* cpu */,
9208                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9209         if (pfd < 0) {
9210                 err = -errno;
9211                 pr_warn("%s perf_event_open() failed: %s\n",
9212                         uprobe ? "uprobe" : "kprobe",
9213                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9214                 return err;
9215         }
9216         return pfd;
9217 }
9218
9219 struct bpf_link *
9220 bpf_program__attach_kprobe_opts(struct bpf_program *prog,
9221                                 const char *func_name,
9222                                 const struct bpf_kprobe_opts *opts)
9223 {
9224         DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
9225         char errmsg[STRERR_BUFSIZE];
9226         struct bpf_link *link;
9227         unsigned long offset;
9228         bool retprobe;
9229         int pfd, err;
9230
9231         if (!OPTS_VALID(opts, bpf_kprobe_opts))
9232                 return libbpf_err_ptr(-EINVAL);
9233
9234         retprobe = OPTS_GET(opts, retprobe, false);
9235         offset = OPTS_GET(opts, offset, 0);
9236         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
9237
9238         pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
9239                                     offset, -1 /* pid */, 0 /* ref_ctr_off */);
9240         if (pfd < 0) {
9241                 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9242                         prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9243                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9244                 return libbpf_err_ptr(pfd);
9245         }
9246         link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
9247         err = libbpf_get_error(link);
9248         if (err) {
9249                 close(pfd);
9250                 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9251                         prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9252                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9253                 return libbpf_err_ptr(err);
9254         }
9255         return link;
9256 }
9257
9258 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9259                                             bool retprobe,
9260                                             const char *func_name)
9261 {
9262         DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
9263                 .retprobe = retprobe,
9264         );
9265
9266         return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
9267 }
9268
9269 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9270                                       struct bpf_program *prog)
9271 {
9272         DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
9273         unsigned long offset = 0;
9274         struct bpf_link *link;
9275         const char *func_name;
9276         char *func;
9277         int n, err;
9278
9279         func_name = prog->sec_name + sec->len;
9280         opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9281
9282         n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
9283         if (n < 1) {
9284                 err = -EINVAL;
9285                 pr_warn("kprobe name is invalid: %s\n", func_name);
9286                 return libbpf_err_ptr(err);
9287         }
9288         if (opts.retprobe && offset != 0) {
9289                 free(func);
9290                 err = -EINVAL;
9291                 pr_warn("kretprobes do not support offset specification\n");
9292                 return libbpf_err_ptr(err);
9293         }
9294
9295         opts.offset = offset;
9296         link = bpf_program__attach_kprobe_opts(prog, func, &opts);
9297         free(func);
9298         return link;
9299 }
9300
9301 LIBBPF_API struct bpf_link *
9302 bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid,
9303                                 const char *binary_path, size_t func_offset,
9304                                 const struct bpf_uprobe_opts *opts)
9305 {
9306         DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
9307         char errmsg[STRERR_BUFSIZE];
9308         struct bpf_link *link;
9309         size_t ref_ctr_off;
9310         int pfd, err;
9311         bool retprobe;
9312
9313         if (!OPTS_VALID(opts, bpf_uprobe_opts))
9314                 return libbpf_err_ptr(-EINVAL);
9315
9316         retprobe = OPTS_GET(opts, retprobe, false);
9317         ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
9318         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
9319
9320         pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
9321                                     func_offset, pid, ref_ctr_off);
9322         if (pfd < 0) {
9323                 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9324                         prog->name, retprobe ? "uretprobe" : "uprobe",
9325                         binary_path, func_offset,
9326                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9327                 return libbpf_err_ptr(pfd);
9328         }
9329         link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
9330         err = libbpf_get_error(link);
9331         if (err) {
9332                 close(pfd);
9333                 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9334                         prog->name, retprobe ? "uretprobe" : "uprobe",
9335                         binary_path, func_offset,
9336                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9337                 return libbpf_err_ptr(err);
9338         }
9339         return link;
9340 }
9341
9342 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9343                                             bool retprobe, pid_t pid,
9344                                             const char *binary_path,
9345                                             size_t func_offset)
9346 {
9347         DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
9348
9349         return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
9350 }
9351
9352 static int determine_tracepoint_id(const char *tp_category,
9353                                    const char *tp_name)
9354 {
9355         char file[PATH_MAX];
9356         int ret;
9357
9358         ret = snprintf(file, sizeof(file),
9359                        "/sys/kernel/debug/tracing/events/%s/%s/id",
9360                        tp_category, tp_name);
9361         if (ret < 0)
9362                 return -errno;
9363         if (ret >= sizeof(file)) {
9364                 pr_debug("tracepoint %s/%s path is too long\n",
9365                          tp_category, tp_name);
9366                 return -E2BIG;
9367         }
9368         return parse_uint_from_file(file, "%d\n");
9369 }
9370
9371 static int perf_event_open_tracepoint(const char *tp_category,
9372                                       const char *tp_name)
9373 {
9374         struct perf_event_attr attr = {};
9375         char errmsg[STRERR_BUFSIZE];
9376         int tp_id, pfd, err;
9377
9378         tp_id = determine_tracepoint_id(tp_category, tp_name);
9379         if (tp_id < 0) {
9380                 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9381                         tp_category, tp_name,
9382                         libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
9383                 return tp_id;
9384         }
9385
9386         attr.type = PERF_TYPE_TRACEPOINT;
9387         attr.size = sizeof(attr);
9388         attr.config = tp_id;
9389
9390         pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
9391                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9392         if (pfd < 0) {
9393                 err = -errno;
9394                 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9395                         tp_category, tp_name,
9396                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9397                 return err;
9398         }
9399         return pfd;
9400 }
9401
9402 struct bpf_link *bpf_program__attach_tracepoint_opts(struct bpf_program *prog,
9403                                                      const char *tp_category,
9404                                                      const char *tp_name,
9405                                                      const struct bpf_tracepoint_opts *opts)
9406 {
9407         DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
9408         char errmsg[STRERR_BUFSIZE];
9409         struct bpf_link *link;
9410         int pfd, err;
9411
9412         if (!OPTS_VALID(opts, bpf_tracepoint_opts))
9413                 return libbpf_err_ptr(-EINVAL);
9414
9415         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
9416
9417         pfd = perf_event_open_tracepoint(tp_category, tp_name);
9418         if (pfd < 0) {
9419                 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9420                         prog->name, tp_category, tp_name,
9421                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9422                 return libbpf_err_ptr(pfd);
9423         }
9424         link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
9425         err = libbpf_get_error(link);
9426         if (err) {
9427                 close(pfd);
9428                 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9429                         prog->name, tp_category, tp_name,
9430                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9431                 return libbpf_err_ptr(err);
9432         }
9433         return link;
9434 }
9435
9436 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9437                                                 const char *tp_category,
9438                                                 const char *tp_name)
9439 {
9440         return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
9441 }
9442
9443 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9444                                   struct bpf_program *prog)
9445 {
9446         char *sec_name, *tp_cat, *tp_name;
9447         struct bpf_link *link;
9448
9449         sec_name = strdup(prog->sec_name);
9450         if (!sec_name)
9451                 return libbpf_err_ptr(-ENOMEM);
9452
9453         /* extract "tp/<category>/<name>" */
9454         tp_cat = sec_name + sec->len;
9455         tp_name = strchr(tp_cat, '/');
9456         if (!tp_name) {
9457                 free(sec_name);
9458                 return libbpf_err_ptr(-EINVAL);
9459         }
9460         *tp_name = '\0';
9461         tp_name++;
9462
9463         link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9464         free(sec_name);
9465         return link;
9466 }
9467
9468 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9469                                                     const char *tp_name)
9470 {
9471         char errmsg[STRERR_BUFSIZE];
9472         struct bpf_link *link;
9473         int prog_fd, pfd;
9474
9475         prog_fd = bpf_program__fd(prog);
9476         if (prog_fd < 0) {
9477                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9478                 return libbpf_err_ptr(-EINVAL);
9479         }
9480
9481         link = calloc(1, sizeof(*link));
9482         if (!link)
9483                 return libbpf_err_ptr(-ENOMEM);
9484         link->detach = &bpf_link__detach_fd;
9485
9486         pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9487         if (pfd < 0) {
9488                 pfd = -errno;
9489                 free(link);
9490                 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9491                         prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9492                 return libbpf_err_ptr(pfd);
9493         }
9494         link->fd = pfd;
9495         return link;
9496 }
9497
9498 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9499                                       struct bpf_program *prog)
9500 {
9501         const char *tp_name = prog->sec_name + sec->len;
9502
9503         return bpf_program__attach_raw_tracepoint(prog, tp_name);
9504 }
9505
9506 /* Common logic for all BPF program types that attach to a btf_id */
9507 static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
9508 {
9509         char errmsg[STRERR_BUFSIZE];
9510         struct bpf_link *link;
9511         int prog_fd, pfd;
9512
9513         prog_fd = bpf_program__fd(prog);
9514         if (prog_fd < 0) {
9515                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9516                 return libbpf_err_ptr(-EINVAL);
9517         }
9518
9519         link = calloc(1, sizeof(*link));
9520         if (!link)
9521                 return libbpf_err_ptr(-ENOMEM);
9522         link->detach = &bpf_link__detach_fd;
9523
9524         pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9525         if (pfd < 0) {
9526                 pfd = -errno;
9527                 free(link);
9528                 pr_warn("prog '%s': failed to attach: %s\n",
9529                         prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9530                 return libbpf_err_ptr(pfd);
9531         }
9532         link->fd = pfd;
9533         return (struct bpf_link *)link;
9534 }
9535
9536 struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9537 {
9538         return bpf_program__attach_btf_id(prog);
9539 }
9540
9541 struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
9542 {
9543         return bpf_program__attach_btf_id(prog);
9544 }
9545
9546 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9547                                      struct bpf_program *prog)
9548 {
9549         return bpf_program__attach_trace(prog);
9550 }
9551
9552 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9553                                    struct bpf_program *prog)
9554 {
9555         return bpf_program__attach_lsm(prog);
9556 }
9557
9558 static struct bpf_link *
9559 bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
9560                        const char *target_name)
9561 {
9562         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
9563                             .target_btf_id = btf_id);
9564         enum bpf_attach_type attach_type;
9565         char errmsg[STRERR_BUFSIZE];
9566         struct bpf_link *link;
9567         int prog_fd, link_fd;
9568
9569         prog_fd = bpf_program__fd(prog);
9570         if (prog_fd < 0) {
9571                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9572                 return libbpf_err_ptr(-EINVAL);
9573         }
9574
9575         link = calloc(1, sizeof(*link));
9576         if (!link)
9577                 return libbpf_err_ptr(-ENOMEM);
9578         link->detach = &bpf_link__detach_fd;
9579
9580         attach_type = bpf_program__get_expected_attach_type(prog);
9581         link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
9582         if (link_fd < 0) {
9583                 link_fd = -errno;
9584                 free(link);
9585                 pr_warn("prog '%s': failed to attach to %s: %s\n",
9586                         prog->name, target_name,
9587                         libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9588                 return libbpf_err_ptr(link_fd);
9589         }
9590         link->fd = link_fd;
9591         return link;
9592 }
9593
9594 struct bpf_link *
9595 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
9596 {
9597         return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
9598 }
9599
9600 struct bpf_link *
9601 bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
9602 {
9603         return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
9604 }
9605
9606 struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
9607 {
9608         /* target_fd/target_ifindex use the same field in LINK_CREATE */
9609         return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
9610 }
9611
9612 struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
9613                                               int target_fd,
9614                                               const char *attach_func_name)
9615 {
9616         int btf_id;
9617
9618         if (!!target_fd != !!attach_func_name) {
9619                 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
9620                         prog->name);
9621                 return libbpf_err_ptr(-EINVAL);
9622         }
9623
9624         if (prog->type != BPF_PROG_TYPE_EXT) {
9625                 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
9626                         prog->name);
9627                 return libbpf_err_ptr(-EINVAL);
9628         }
9629
9630         if (target_fd) {
9631                 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
9632                 if (btf_id < 0)
9633                         return libbpf_err_ptr(btf_id);
9634
9635                 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
9636         } else {
9637                 /* no target, so use raw_tracepoint_open for compatibility
9638                  * with old kernels
9639                  */
9640                 return bpf_program__attach_trace(prog);
9641         }
9642 }
9643
9644 struct bpf_link *
9645 bpf_program__attach_iter(struct bpf_program *prog,
9646                          const struct bpf_iter_attach_opts *opts)
9647 {
9648         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
9649         char errmsg[STRERR_BUFSIZE];
9650         struct bpf_link *link;
9651         int prog_fd, link_fd;
9652         __u32 target_fd = 0;
9653
9654         if (!OPTS_VALID(opts, bpf_iter_attach_opts))
9655                 return libbpf_err_ptr(-EINVAL);
9656
9657         link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
9658         link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
9659
9660         prog_fd = bpf_program__fd(prog);
9661         if (prog_fd < 0) {
9662                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9663                 return libbpf_err_ptr(-EINVAL);
9664         }
9665
9666         link = calloc(1, sizeof(*link));
9667         if (!link)
9668                 return libbpf_err_ptr(-ENOMEM);
9669         link->detach = &bpf_link__detach_fd;
9670
9671         link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
9672                                   &link_create_opts);
9673         if (link_fd < 0) {
9674                 link_fd = -errno;
9675                 free(link);
9676                 pr_warn("prog '%s': failed to attach to iterator: %s\n",
9677                         prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9678                 return libbpf_err_ptr(link_fd);
9679         }
9680         link->fd = link_fd;
9681         return link;
9682 }
9683
9684 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9685                                     struct bpf_program *prog)
9686 {
9687         return bpf_program__attach_iter(prog, NULL);
9688 }
9689
9690 struct bpf_link *bpf_program__attach(struct bpf_program *prog)
9691 {
9692         const struct bpf_sec_def *sec_def;
9693
9694         sec_def = find_sec_def(prog->sec_name);
9695         if (!sec_def || !sec_def->attach_fn)
9696                 return libbpf_err_ptr(-ESRCH);
9697
9698         return sec_def->attach_fn(sec_def, prog);
9699 }
9700
9701 static int bpf_link__detach_struct_ops(struct bpf_link *link)
9702 {
9703         __u32 zero = 0;
9704
9705         if (bpf_map_delete_elem(link->fd, &zero))
9706                 return -errno;
9707
9708         return 0;
9709 }
9710
9711 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
9712 {
9713         struct bpf_struct_ops *st_ops;
9714         struct bpf_link *link;
9715         __u32 i, zero = 0;
9716         int err;
9717
9718         if (!bpf_map__is_struct_ops(map) || map->fd == -1)
9719                 return libbpf_err_ptr(-EINVAL);
9720
9721         link = calloc(1, sizeof(*link));
9722         if (!link)
9723                 return libbpf_err_ptr(-EINVAL);
9724
9725         st_ops = map->st_ops;
9726         for (i = 0; i < btf_vlen(st_ops->type); i++) {
9727                 struct bpf_program *prog = st_ops->progs[i];
9728                 void *kern_data;
9729                 int prog_fd;
9730
9731                 if (!prog)
9732                         continue;
9733
9734                 prog_fd = bpf_program__fd(prog);
9735                 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
9736                 *(unsigned long *)kern_data = prog_fd;
9737         }
9738
9739         err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
9740         if (err) {
9741                 err = -errno;
9742                 free(link);
9743                 return libbpf_err_ptr(err);
9744         }
9745
9746         link->detach = bpf_link__detach_struct_ops;
9747         link->fd = map->fd;
9748
9749         return link;
9750 }
9751
9752 enum bpf_perf_event_ret
9753 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
9754                            void **copy_mem, size_t *copy_size,
9755                            bpf_perf_event_print_t fn, void *private_data)
9756 {
9757         struct perf_event_mmap_page *header = mmap_mem;
9758         __u64 data_head = ring_buffer_read_head(header);
9759         __u64 data_tail = header->data_tail;
9760         void *base = ((__u8 *)header) + page_size;
9761         int ret = LIBBPF_PERF_EVENT_CONT;
9762         struct perf_event_header *ehdr;
9763         size_t ehdr_size;
9764
9765         while (data_head != data_tail) {
9766                 ehdr = base + (data_tail & (mmap_size - 1));
9767                 ehdr_size = ehdr->size;
9768
9769                 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
9770                         void *copy_start = ehdr;
9771                         size_t len_first = base + mmap_size - copy_start;
9772                         size_t len_secnd = ehdr_size - len_first;
9773
9774                         if (*copy_size < ehdr_size) {
9775                                 free(*copy_mem);
9776                                 *copy_mem = malloc(ehdr_size);
9777                                 if (!*copy_mem) {
9778                                         *copy_size = 0;
9779                                         ret = LIBBPF_PERF_EVENT_ERROR;
9780                                         break;
9781                                 }
9782                                 *copy_size = ehdr_size;
9783                         }
9784
9785                         memcpy(*copy_mem, copy_start, len_first);
9786                         memcpy(*copy_mem + len_first, base, len_secnd);
9787                         ehdr = *copy_mem;
9788                 }
9789
9790                 ret = fn(ehdr, private_data);
9791                 data_tail += ehdr_size;
9792                 if (ret != LIBBPF_PERF_EVENT_CONT)
9793                         break;
9794         }
9795
9796         ring_buffer_write_tail(header, data_tail);
9797         return libbpf_err(ret);
9798 }
9799
9800 struct perf_buffer;
9801
9802 struct perf_buffer_params {
9803         struct perf_event_attr *attr;
9804         /* if event_cb is specified, it takes precendence */
9805         perf_buffer_event_fn event_cb;
9806         /* sample_cb and lost_cb are higher-level common-case callbacks */
9807         perf_buffer_sample_fn sample_cb;
9808         perf_buffer_lost_fn lost_cb;
9809         void *ctx;
9810         int cpu_cnt;
9811         int *cpus;
9812         int *map_keys;
9813 };
9814
9815 struct perf_cpu_buf {
9816         struct perf_buffer *pb;
9817         void *base; /* mmap()'ed memory */
9818         void *buf; /* for reconstructing segmented data */
9819         size_t buf_size;
9820         int fd;
9821         int cpu;
9822         int map_key;
9823 };
9824
9825 struct perf_buffer {
9826         perf_buffer_event_fn event_cb;
9827         perf_buffer_sample_fn sample_cb;
9828         perf_buffer_lost_fn lost_cb;
9829         void *ctx; /* passed into callbacks */
9830
9831         size_t page_size;
9832         size_t mmap_size;
9833         struct perf_cpu_buf **cpu_bufs;
9834         struct epoll_event *events;
9835         int cpu_cnt; /* number of allocated CPU buffers */
9836         int epoll_fd; /* perf event FD */
9837         int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
9838 };
9839
9840 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
9841                                       struct perf_cpu_buf *cpu_buf)
9842 {
9843         if (!cpu_buf)
9844                 return;
9845         if (cpu_buf->base &&
9846             munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
9847                 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
9848         if (cpu_buf->fd >= 0) {
9849                 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
9850                 close(cpu_buf->fd);
9851         }
9852         free(cpu_buf->buf);
9853         free(cpu_buf);
9854 }
9855
9856 void perf_buffer__free(struct perf_buffer *pb)
9857 {
9858         int i;
9859
9860         if (IS_ERR_OR_NULL(pb))
9861                 return;
9862         if (pb->cpu_bufs) {
9863                 for (i = 0; i < pb->cpu_cnt; i++) {
9864                         struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
9865
9866                         if (!cpu_buf)
9867                                 continue;
9868
9869                         bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
9870                         perf_buffer__free_cpu_buf(pb, cpu_buf);
9871                 }
9872                 free(pb->cpu_bufs);
9873         }
9874         if (pb->epoll_fd >= 0)
9875                 close(pb->epoll_fd);
9876         free(pb->events);
9877         free(pb);
9878 }
9879
9880 static struct perf_cpu_buf *
9881 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
9882                           int cpu, int map_key)
9883 {
9884         struct perf_cpu_buf *cpu_buf;
9885         char msg[STRERR_BUFSIZE];
9886         int err;
9887
9888         cpu_buf = calloc(1, sizeof(*cpu_buf));
9889         if (!cpu_buf)
9890                 return ERR_PTR(-ENOMEM);
9891
9892         cpu_buf->pb = pb;
9893         cpu_buf->cpu = cpu;
9894         cpu_buf->map_key = map_key;
9895
9896         cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
9897                               -1, PERF_FLAG_FD_CLOEXEC);
9898         if (cpu_buf->fd < 0) {
9899                 err = -errno;
9900                 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
9901                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
9902                 goto error;
9903         }
9904
9905         cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
9906                              PROT_READ | PROT_WRITE, MAP_SHARED,
9907                              cpu_buf->fd, 0);
9908         if (cpu_buf->base == MAP_FAILED) {
9909                 cpu_buf->base = NULL;
9910                 err = -errno;
9911                 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
9912                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
9913                 goto error;
9914         }
9915
9916         if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9917                 err = -errno;
9918                 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
9919                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
9920                 goto error;
9921         }
9922
9923         return cpu_buf;
9924
9925 error:
9926         perf_buffer__free_cpu_buf(pb, cpu_buf);
9927         return (struct perf_cpu_buf *)ERR_PTR(err);
9928 }
9929
9930 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
9931                                               struct perf_buffer_params *p);
9932
9933 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
9934                                      const struct perf_buffer_opts *opts)
9935 {
9936         struct perf_buffer_params p = {};
9937         struct perf_event_attr attr = { 0, };
9938
9939         attr.config = PERF_COUNT_SW_BPF_OUTPUT;
9940         attr.type = PERF_TYPE_SOFTWARE;
9941         attr.sample_type = PERF_SAMPLE_RAW;
9942         attr.sample_period = 1;
9943         attr.wakeup_events = 1;
9944
9945         p.attr = &attr;
9946         p.sample_cb = opts ? opts->sample_cb : NULL;
9947         p.lost_cb = opts ? opts->lost_cb : NULL;
9948         p.ctx = opts ? opts->ctx : NULL;
9949
9950         return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
9951 }
9952
9953 struct perf_buffer *
9954 perf_buffer__new_raw(int map_fd, size_t page_cnt,
9955                      const struct perf_buffer_raw_opts *opts)
9956 {
9957         struct perf_buffer_params p = {};
9958
9959         p.attr = opts->attr;
9960         p.event_cb = opts->event_cb;
9961         p.ctx = opts->ctx;
9962         p.cpu_cnt = opts->cpu_cnt;
9963         p.cpus = opts->cpus;
9964         p.map_keys = opts->map_keys;
9965
9966         return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
9967 }
9968
9969 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
9970                                               struct perf_buffer_params *p)
9971 {
9972         const char *online_cpus_file = "/sys/devices/system/cpu/online";
9973         struct bpf_map_info map;
9974         char msg[STRERR_BUFSIZE];
9975         struct perf_buffer *pb;
9976         bool *online = NULL;
9977         __u32 map_info_len;
9978         int err, i, j, n;
9979
9980         if (page_cnt & (page_cnt - 1)) {
9981                 pr_warn("page count should be power of two, but is %zu\n",
9982                         page_cnt);
9983                 return ERR_PTR(-EINVAL);
9984         }
9985
9986         /* best-effort sanity checks */
9987         memset(&map, 0, sizeof(map));
9988         map_info_len = sizeof(map);
9989         err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
9990         if (err) {
9991                 err = -errno;
9992                 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
9993                  * -EBADFD, -EFAULT, or -E2BIG on real error
9994                  */
9995                 if (err != -EINVAL) {
9996                         pr_warn("failed to get map info for map FD %d: %s\n",
9997                                 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
9998                         return ERR_PTR(err);
9999                 }
10000                 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10001                          map_fd);
10002         } else {
10003                 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10004                         pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10005                                 map.name);
10006                         return ERR_PTR(-EINVAL);
10007                 }
10008         }
10009
10010         pb = calloc(1, sizeof(*pb));
10011         if (!pb)
10012                 return ERR_PTR(-ENOMEM);
10013
10014         pb->event_cb = p->event_cb;
10015         pb->sample_cb = p->sample_cb;
10016         pb->lost_cb = p->lost_cb;
10017         pb->ctx = p->ctx;
10018
10019         pb->page_size = getpagesize();
10020         pb->mmap_size = pb->page_size * page_cnt;
10021         pb->map_fd = map_fd;
10022
10023         pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10024         if (pb->epoll_fd < 0) {
10025                 err = -errno;
10026                 pr_warn("failed to create epoll instance: %s\n",
10027                         libbpf_strerror_r(err, msg, sizeof(msg)));
10028                 goto error;
10029         }
10030
10031         if (p->cpu_cnt > 0) {
10032                 pb->cpu_cnt = p->cpu_cnt;
10033         } else {
10034                 pb->cpu_cnt = libbpf_num_possible_cpus();
10035                 if (pb->cpu_cnt < 0) {
10036                         err = pb->cpu_cnt;
10037                         goto error;
10038                 }
10039                 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10040                         pb->cpu_cnt = map.max_entries;
10041         }
10042
10043         pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10044         if (!pb->events) {
10045                 err = -ENOMEM;
10046                 pr_warn("failed to allocate events: out of memory\n");
10047                 goto error;
10048         }
10049         pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10050         if (!pb->cpu_bufs) {
10051                 err = -ENOMEM;
10052                 pr_warn("failed to allocate buffers: out of memory\n");
10053                 goto error;
10054         }
10055
10056         err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10057         if (err) {
10058                 pr_warn("failed to get online CPU mask: %d\n", err);
10059                 goto error;
10060         }
10061
10062         for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10063                 struct perf_cpu_buf *cpu_buf;
10064                 int cpu, map_key;
10065
10066                 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10067                 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10068
10069                 /* in case user didn't explicitly requested particular CPUs to
10070                  * be attached to, skip offline/not present CPUs
10071                  */
10072                 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10073                         continue;
10074
10075                 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10076                 if (IS_ERR(cpu_buf)) {
10077                         err = PTR_ERR(cpu_buf);
10078                         goto error;
10079                 }
10080
10081                 pb->cpu_bufs[j] = cpu_buf;
10082
10083                 err = bpf_map_update_elem(pb->map_fd, &map_key,
10084                                           &cpu_buf->fd, 0);
10085                 if (err) {
10086                         err = -errno;
10087                         pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10088                                 cpu, map_key, cpu_buf->fd,
10089                                 libbpf_strerror_r(err, msg, sizeof(msg)));
10090                         goto error;
10091                 }
10092
10093                 pb->events[j].events = EPOLLIN;
10094                 pb->events[j].data.ptr = cpu_buf;
10095                 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10096                               &pb->events[j]) < 0) {
10097                         err = -errno;
10098                         pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10099                                 cpu, cpu_buf->fd,
10100                                 libbpf_strerror_r(err, msg, sizeof(msg)));
10101                         goto error;
10102                 }
10103                 j++;
10104         }
10105         pb->cpu_cnt = j;
10106         free(online);
10107
10108         return pb;
10109
10110 error:
10111         free(online);
10112         if (pb)
10113                 perf_buffer__free(pb);
10114         return ERR_PTR(err);
10115 }
10116
10117 struct perf_sample_raw {
10118         struct perf_event_header header;
10119         uint32_t size;
10120         char data[];
10121 };
10122
10123 struct perf_sample_lost {
10124         struct perf_event_header header;
10125         uint64_t id;
10126         uint64_t lost;
10127         uint64_t sample_id;
10128 };
10129
10130 static enum bpf_perf_event_ret
10131 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10132 {
10133         struct perf_cpu_buf *cpu_buf = ctx;
10134         struct perf_buffer *pb = cpu_buf->pb;
10135         void *data = e;
10136
10137         /* user wants full control over parsing perf event */
10138         if (pb->event_cb)
10139                 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10140
10141         switch (e->type) {
10142         case PERF_RECORD_SAMPLE: {
10143                 struct perf_sample_raw *s = data;
10144
10145                 if (pb->sample_cb)
10146                         pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10147                 break;
10148         }
10149         case PERF_RECORD_LOST: {
10150                 struct perf_sample_lost *s = data;
10151
10152                 if (pb->lost_cb)
10153                         pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10154                 break;
10155         }
10156         default:
10157                 pr_warn("unknown perf sample type %d\n", e->type);
10158                 return LIBBPF_PERF_EVENT_ERROR;
10159         }
10160         return LIBBPF_PERF_EVENT_CONT;
10161 }
10162
10163 static int perf_buffer__process_records(struct perf_buffer *pb,
10164                                         struct perf_cpu_buf *cpu_buf)
10165 {
10166         enum bpf_perf_event_ret ret;
10167
10168         ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10169                                          pb->page_size, &cpu_buf->buf,
10170                                          &cpu_buf->buf_size,
10171                                          perf_buffer__process_record, cpu_buf);
10172         if (ret != LIBBPF_PERF_EVENT_CONT)
10173                 return ret;
10174         return 0;
10175 }
10176
10177 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10178 {
10179         return pb->epoll_fd;
10180 }
10181
10182 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10183 {
10184         int i, cnt, err;
10185
10186         cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10187         if (cnt < 0)
10188                 return -errno;
10189
10190         for (i = 0; i < cnt; i++) {
10191                 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10192
10193                 err = perf_buffer__process_records(pb, cpu_buf);
10194                 if (err) {
10195                         pr_warn("error while processing records: %d\n", err);
10196                         return libbpf_err(err);
10197                 }
10198         }
10199         return cnt;
10200 }
10201
10202 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10203  * manager.
10204  */
10205 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10206 {
10207         return pb->cpu_cnt;
10208 }
10209
10210 /*
10211  * Return perf_event FD of a ring buffer in *buf_idx* slot of
10212  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10213  * select()/poll()/epoll() Linux syscalls.
10214  */
10215 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10216 {
10217         struct perf_cpu_buf *cpu_buf;
10218
10219         if (buf_idx >= pb->cpu_cnt)
10220                 return libbpf_err(-EINVAL);
10221
10222         cpu_buf = pb->cpu_bufs[buf_idx];
10223         if (!cpu_buf)
10224                 return libbpf_err(-ENOENT);
10225
10226         return cpu_buf->fd;
10227 }
10228
10229 /*
10230  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
10231  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10232  * consume, do nothing and return success.
10233  * Returns:
10234  *   - 0 on success;
10235  *   - <0 on failure.
10236  */
10237 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10238 {
10239         struct perf_cpu_buf *cpu_buf;
10240
10241         if (buf_idx >= pb->cpu_cnt)
10242                 return libbpf_err(-EINVAL);
10243
10244         cpu_buf = pb->cpu_bufs[buf_idx];
10245         if (!cpu_buf)
10246                 return libbpf_err(-ENOENT);
10247
10248         return perf_buffer__process_records(pb, cpu_buf);
10249 }
10250
10251 int perf_buffer__consume(struct perf_buffer *pb)
10252 {
10253         int i, err;
10254
10255         for (i = 0; i < pb->cpu_cnt; i++) {
10256                 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10257
10258                 if (!cpu_buf)
10259                         continue;
10260
10261                 err = perf_buffer__process_records(pb, cpu_buf);
10262                 if (err) {
10263                         pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10264                         return libbpf_err(err);
10265                 }
10266         }
10267         return 0;
10268 }
10269
10270 struct bpf_prog_info_array_desc {
10271         int     array_offset;   /* e.g. offset of jited_prog_insns */
10272         int     count_offset;   /* e.g. offset of jited_prog_len */
10273         int     size_offset;    /* > 0: offset of rec size,
10274                                  * < 0: fix size of -size_offset
10275                                  */
10276 };
10277
10278 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10279         [BPF_PROG_INFO_JITED_INSNS] = {
10280                 offsetof(struct bpf_prog_info, jited_prog_insns),
10281                 offsetof(struct bpf_prog_info, jited_prog_len),
10282                 -1,
10283         },
10284         [BPF_PROG_INFO_XLATED_INSNS] = {
10285                 offsetof(struct bpf_prog_info, xlated_prog_insns),
10286                 offsetof(struct bpf_prog_info, xlated_prog_len),
10287                 -1,
10288         },
10289         [BPF_PROG_INFO_MAP_IDS] = {
10290                 offsetof(struct bpf_prog_info, map_ids),
10291                 offsetof(struct bpf_prog_info, nr_map_ids),
10292                 -(int)sizeof(__u32),
10293         },
10294         [BPF_PROG_INFO_JITED_KSYMS] = {
10295                 offsetof(struct bpf_prog_info, jited_ksyms),
10296                 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10297                 -(int)sizeof(__u64),
10298         },
10299         [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10300                 offsetof(struct bpf_prog_info, jited_func_lens),
10301                 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10302                 -(int)sizeof(__u32),
10303         },
10304         [BPF_PROG_INFO_FUNC_INFO] = {
10305                 offsetof(struct bpf_prog_info, func_info),
10306                 offsetof(struct bpf_prog_info, nr_func_info),
10307                 offsetof(struct bpf_prog_info, func_info_rec_size),
10308         },
10309         [BPF_PROG_INFO_LINE_INFO] = {
10310                 offsetof(struct bpf_prog_info, line_info),
10311                 offsetof(struct bpf_prog_info, nr_line_info),
10312                 offsetof(struct bpf_prog_info, line_info_rec_size),
10313         },
10314         [BPF_PROG_INFO_JITED_LINE_INFO] = {
10315                 offsetof(struct bpf_prog_info, jited_line_info),
10316                 offsetof(struct bpf_prog_info, nr_jited_line_info),
10317                 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10318         },
10319         [BPF_PROG_INFO_PROG_TAGS] = {
10320                 offsetof(struct bpf_prog_info, prog_tags),
10321                 offsetof(struct bpf_prog_info, nr_prog_tags),
10322                 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10323         },
10324
10325 };
10326
10327 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10328                                            int offset)
10329 {
10330         __u32 *array = (__u32 *)info;
10331
10332         if (offset >= 0)
10333                 return array[offset / sizeof(__u32)];
10334         return -(int)offset;
10335 }
10336
10337 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10338                                            int offset)
10339 {
10340         __u64 *array = (__u64 *)info;
10341
10342         if (offset >= 0)
10343                 return array[offset / sizeof(__u64)];
10344         return -(int)offset;
10345 }
10346
10347 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10348                                          __u32 val)
10349 {
10350         __u32 *array = (__u32 *)info;
10351
10352         if (offset >= 0)
10353                 array[offset / sizeof(__u32)] = val;
10354 }
10355
10356 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10357                                          __u64 val)
10358 {
10359         __u64 *array = (__u64 *)info;
10360
10361         if (offset >= 0)
10362                 array[offset / sizeof(__u64)] = val;
10363 }
10364
10365 struct bpf_prog_info_linear *
10366 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10367 {
10368         struct bpf_prog_info_linear *info_linear;
10369         struct bpf_prog_info info = {};
10370         __u32 info_len = sizeof(info);
10371         __u32 data_len = 0;
10372         int i, err;
10373         void *ptr;
10374
10375         if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10376                 return libbpf_err_ptr(-EINVAL);
10377
10378         /* step 1: get array dimensions */
10379         err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10380         if (err) {
10381                 pr_debug("can't get prog info: %s", strerror(errno));
10382                 return libbpf_err_ptr(-EFAULT);
10383         }
10384
10385         /* step 2: calculate total size of all arrays */
10386         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10387                 bool include_array = (arrays & (1UL << i)) > 0;
10388                 struct bpf_prog_info_array_desc *desc;
10389                 __u32 count, size;
10390
10391                 desc = bpf_prog_info_array_desc + i;
10392
10393                 /* kernel is too old to support this field */
10394                 if (info_len < desc->array_offset + sizeof(__u32) ||
10395                     info_len < desc->count_offset + sizeof(__u32) ||
10396                     (desc->size_offset > 0 && info_len < desc->size_offset))
10397                         include_array = false;
10398
10399                 if (!include_array) {
10400                         arrays &= ~(1UL << i);  /* clear the bit */
10401                         continue;
10402                 }
10403
10404                 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10405                 size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10406
10407                 data_len += count * size;
10408         }
10409
10410         /* step 3: allocate continuous memory */
10411         data_len = roundup(data_len, sizeof(__u64));
10412         info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10413         if (!info_linear)
10414                 return libbpf_err_ptr(-ENOMEM);
10415
10416         /* step 4: fill data to info_linear->info */
10417         info_linear->arrays = arrays;
10418         memset(&info_linear->info, 0, sizeof(info));
10419         ptr = info_linear->data;
10420
10421         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10422                 struct bpf_prog_info_array_desc *desc;
10423                 __u32 count, size;
10424
10425                 if ((arrays & (1UL << i)) == 0)
10426                         continue;
10427
10428                 desc  = bpf_prog_info_array_desc + i;
10429                 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10430                 size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10431                 bpf_prog_info_set_offset_u32(&info_linear->info,
10432                                              desc->count_offset, count);
10433                 bpf_prog_info_set_offset_u32(&info_linear->info,
10434                                              desc->size_offset, size);
10435                 bpf_prog_info_set_offset_u64(&info_linear->info,
10436                                              desc->array_offset,
10437                                              ptr_to_u64(ptr));
10438                 ptr += count * size;
10439         }
10440
10441         /* step 5: call syscall again to get required arrays */
10442         err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10443         if (err) {
10444                 pr_debug("can't get prog info: %s", strerror(errno));
10445                 free(info_linear);
10446                 return libbpf_err_ptr(-EFAULT);
10447         }
10448
10449         /* step 6: verify the data */
10450         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10451                 struct bpf_prog_info_array_desc *desc;
10452                 __u32 v1, v2;
10453
10454                 if ((arrays & (1UL << i)) == 0)
10455                         continue;
10456
10457                 desc = bpf_prog_info_array_desc + i;
10458                 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10459                 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10460                                                    desc->count_offset);
10461                 if (v1 != v2)
10462                         pr_warn("%s: mismatch in element count\n", __func__);
10463
10464                 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10465                 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10466                                                    desc->size_offset);
10467                 if (v1 != v2)
10468                         pr_warn("%s: mismatch in rec size\n", __func__);
10469         }
10470
10471         /* step 7: update info_len and data_len */
10472         info_linear->info_len = sizeof(struct bpf_prog_info);
10473         info_linear->data_len = data_len;
10474
10475         return info_linear;
10476 }
10477
10478 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10479 {
10480         int i;
10481
10482         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10483                 struct bpf_prog_info_array_desc *desc;
10484                 __u64 addr, offs;
10485
10486                 if ((info_linear->arrays & (1UL << i)) == 0)
10487                         continue;
10488
10489                 desc = bpf_prog_info_array_desc + i;
10490                 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10491                                                      desc->array_offset);
10492                 offs = addr - ptr_to_u64(info_linear->data);
10493                 bpf_prog_info_set_offset_u64(&info_linear->info,
10494                                              desc->array_offset, offs);
10495         }
10496 }
10497
10498 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10499 {
10500         int i;
10501
10502         for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10503                 struct bpf_prog_info_array_desc *desc;
10504                 __u64 addr, offs;
10505
10506                 if ((info_linear->arrays & (1UL << i)) == 0)
10507                         continue;
10508
10509                 desc = bpf_prog_info_array_desc + i;
10510                 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10511                                                      desc->array_offset);
10512                 addr = offs + ptr_to_u64(info_linear->data);
10513                 bpf_prog_info_set_offset_u64(&info_linear->info,
10514                                              desc->array_offset, addr);
10515         }
10516 }
10517
10518 int bpf_program__set_attach_target(struct bpf_program *prog,
10519                                    int attach_prog_fd,
10520                                    const char *attach_func_name)
10521 {
10522         int btf_obj_fd = 0, btf_id = 0, err;
10523
10524         if (!prog || attach_prog_fd < 0 || !attach_func_name)
10525                 return libbpf_err(-EINVAL);
10526
10527         if (prog->obj->loaded)
10528                 return libbpf_err(-EINVAL);
10529
10530         if (attach_prog_fd) {
10531                 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10532                                                  attach_prog_fd);
10533                 if (btf_id < 0)
10534                         return libbpf_err(btf_id);
10535         } else {
10536                 /* load btf_vmlinux, if not yet */
10537                 err = bpf_object__load_vmlinux_btf(prog->obj, true);
10538                 if (err)
10539                         return libbpf_err(err);
10540                 err = find_kernel_btf_id(prog->obj, attach_func_name,
10541                                          prog->expected_attach_type,
10542                                          &btf_obj_fd, &btf_id);
10543                 if (err)
10544                         return libbpf_err(err);
10545         }
10546
10547         prog->attach_btf_id = btf_id;
10548         prog->attach_btf_obj_fd = btf_obj_fd;
10549         prog->attach_prog_fd = attach_prog_fd;
10550         return 0;
10551 }
10552
10553 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
10554 {
10555         int err = 0, n, len, start, end = -1;
10556         bool *tmp;
10557
10558         *mask = NULL;
10559         *mask_sz = 0;
10560
10561         /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
10562         while (*s) {
10563                 if (*s == ',' || *s == '\n') {
10564                         s++;
10565                         continue;
10566                 }
10567                 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
10568                 if (n <= 0 || n > 2) {
10569                         pr_warn("Failed to get CPU range %s: %d\n", s, n);
10570                         err = -EINVAL;
10571                         goto cleanup;
10572                 } else if (n == 1) {
10573                         end = start;
10574                 }
10575                 if (start < 0 || start > end) {
10576                         pr_warn("Invalid CPU range [%d,%d] in %s\n",
10577                                 start, end, s);
10578                         err = -EINVAL;
10579                         goto cleanup;
10580                 }
10581                 tmp = realloc(*mask, end + 1);
10582                 if (!tmp) {
10583                         err = -ENOMEM;
10584                         goto cleanup;
10585                 }
10586                 *mask = tmp;
10587                 memset(tmp + *mask_sz, 0, start - *mask_sz);
10588                 memset(tmp + start, 1, end - start + 1);
10589                 *mask_sz = end + 1;
10590                 s += len;
10591         }
10592         if (!*mask_sz) {
10593                 pr_warn("Empty CPU range\n");
10594                 return -EINVAL;
10595         }
10596         return 0;
10597 cleanup:
10598         free(*mask);
10599         *mask = NULL;
10600         return err;
10601 }
10602
10603 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
10604 {
10605         int fd, err = 0, len;
10606         char buf[128];
10607
10608         fd = open(fcpu, O_RDONLY);
10609         if (fd < 0) {
10610                 err = -errno;
10611                 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
10612                 return err;
10613         }
10614         len = read(fd, buf, sizeof(buf));
10615         close(fd);
10616         if (len <= 0) {
10617                 err = len ? -errno : -EINVAL;
10618                 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
10619                 return err;
10620         }
10621         if (len >= sizeof(buf)) {
10622                 pr_warn("CPU mask is too big in file %s\n", fcpu);
10623                 return -E2BIG;
10624         }
10625         buf[len] = '\0';
10626
10627         return parse_cpu_mask_str(buf, mask, mask_sz);
10628 }
10629
10630 int libbpf_num_possible_cpus(void)
10631 {
10632         static const char *fcpu = "/sys/devices/system/cpu/possible";
10633         static int cpus;
10634         int err, n, i, tmp_cpus;
10635         bool *mask;
10636
10637         tmp_cpus = READ_ONCE(cpus);
10638         if (tmp_cpus > 0)
10639                 return tmp_cpus;
10640
10641         err = parse_cpu_mask_file(fcpu, &mask, &n);
10642         if (err)
10643                 return libbpf_err(err);
10644
10645         tmp_cpus = 0;
10646         for (i = 0; i < n; i++) {
10647                 if (mask[i])
10648                         tmp_cpus++;
10649         }
10650         free(mask);
10651
10652         WRITE_ONCE(cpus, tmp_cpus);
10653         return tmp_cpus;
10654 }
10655
10656 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
10657                               const struct bpf_object_open_opts *opts)
10658 {
10659         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
10660                 .object_name = s->name,
10661         );
10662         struct bpf_object *obj;
10663         int i, err;
10664
10665         /* Attempt to preserve opts->object_name, unless overriden by user
10666          * explicitly. Overwriting object name for skeletons is discouraged,
10667          * as it breaks global data maps, because they contain object name
10668          * prefix as their own map name prefix. When skeleton is generated,
10669          * bpftool is making an assumption that this name will stay the same.
10670          */
10671         if (opts) {
10672                 memcpy(&skel_opts, opts, sizeof(*opts));
10673                 if (!opts->object_name)
10674                         skel_opts.object_name = s->name;
10675         }
10676
10677         obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
10678         err = libbpf_get_error(obj);
10679         if (err) {
10680                 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
10681                         s->name, err);
10682                 return libbpf_err(err);
10683         }
10684
10685         *s->obj = obj;
10686
10687         for (i = 0; i < s->map_cnt; i++) {
10688                 struct bpf_map **map = s->maps[i].map;
10689                 const char *name = s->maps[i].name;
10690                 void **mmaped = s->maps[i].mmaped;
10691
10692                 *map = bpf_object__find_map_by_name(obj, name);
10693                 if (!*map) {
10694                         pr_warn("failed to find skeleton map '%s'\n", name);
10695                         return libbpf_err(-ESRCH);
10696                 }
10697
10698                 /* externs shouldn't be pre-setup from user code */
10699                 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
10700                         *mmaped = (*map)->mmaped;
10701         }
10702
10703         for (i = 0; i < s->prog_cnt; i++) {
10704                 struct bpf_program **prog = s->progs[i].prog;
10705                 const char *name = s->progs[i].name;
10706
10707                 *prog = bpf_object__find_program_by_name(obj, name);
10708                 if (!*prog) {
10709                         pr_warn("failed to find skeleton program '%s'\n", name);
10710                         return libbpf_err(-ESRCH);
10711                 }
10712         }
10713
10714         return 0;
10715 }
10716
10717 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
10718 {
10719         int i, err;
10720
10721         err = bpf_object__load(*s->obj);
10722         if (err) {
10723                 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
10724                 return libbpf_err(err);
10725         }
10726
10727         for (i = 0; i < s->map_cnt; i++) {
10728                 struct bpf_map *map = *s->maps[i].map;
10729                 size_t mmap_sz = bpf_map_mmap_sz(map);
10730                 int prot, map_fd = bpf_map__fd(map);
10731                 void **mmaped = s->maps[i].mmaped;
10732
10733                 if (!mmaped)
10734                         continue;
10735
10736                 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
10737                         *mmaped = NULL;
10738                         continue;
10739                 }
10740
10741                 if (map->def.map_flags & BPF_F_RDONLY_PROG)
10742                         prot = PROT_READ;
10743                 else
10744                         prot = PROT_READ | PROT_WRITE;
10745
10746                 /* Remap anonymous mmap()-ed "map initialization image" as
10747                  * a BPF map-backed mmap()-ed memory, but preserving the same
10748                  * memory address. This will cause kernel to change process'
10749                  * page table to point to a different piece of kernel memory,
10750                  * but from userspace point of view memory address (and its
10751                  * contents, being identical at this point) will stay the
10752                  * same. This mapping will be released by bpf_object__close()
10753                  * as per normal clean up procedure, so we don't need to worry
10754                  * about it from skeleton's clean up perspective.
10755                  */
10756                 *mmaped = mmap(map->mmaped, mmap_sz, prot,
10757                                 MAP_SHARED | MAP_FIXED, map_fd, 0);
10758                 if (*mmaped == MAP_FAILED) {
10759                         err = -errno;
10760                         *mmaped = NULL;
10761                         pr_warn("failed to re-mmap() map '%s': %d\n",
10762                                  bpf_map__name(map), err);
10763                         return libbpf_err(err);
10764                 }
10765         }
10766
10767         return 0;
10768 }
10769
10770 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
10771 {
10772         int i, err;
10773
10774         for (i = 0; i < s->prog_cnt; i++) {
10775                 struct bpf_program *prog = *s->progs[i].prog;
10776                 struct bpf_link **link = s->progs[i].link;
10777                 const struct bpf_sec_def *sec_def;
10778
10779                 if (!prog->load)
10780                         continue;
10781
10782                 sec_def = find_sec_def(prog->sec_name);
10783                 if (!sec_def || !sec_def->attach_fn)
10784                         continue;
10785
10786                 *link = sec_def->attach_fn(sec_def, prog);
10787                 err = libbpf_get_error(*link);
10788                 if (err) {
10789                         pr_warn("failed to auto-attach program '%s': %d\n",
10790                                 bpf_program__name(prog), err);
10791                         return libbpf_err(err);
10792                 }
10793         }
10794
10795         return 0;
10796 }
10797
10798 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
10799 {
10800         int i;
10801
10802         for (i = 0; i < s->prog_cnt; i++) {
10803                 struct bpf_link **link = s->progs[i].link;
10804
10805                 bpf_link__destroy(*link);
10806                 *link = NULL;
10807         }
10808 }
10809
10810 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
10811 {
10812         if (s->progs)
10813                 bpf_object__detach_skeleton(s);
10814         if (s->obj)
10815                 bpf_object__close(*s->obj);
10816         free(s->maps);
10817         free(s->progs);
10818         free(s);
10819 }