libbpf: Explicitly call write to append content to file
[platform/kernel/linux-starfive.git] / tools / lib / bpf / libbpf.c
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/limits.h>
35 #include <linux/perf_event.h>
36 #include <linux/ring_buffer.h>
37 #include <sys/epoll.h>
38 #include <sys/ioctl.h>
39 #include <sys/mman.h>
40 #include <sys/stat.h>
41 #include <sys/types.h>
42 #include <sys/vfs.h>
43 #include <sys/utsname.h>
44 #include <sys/resource.h>
45 #include <libelf.h>
46 #include <gelf.h>
47 #include <zlib.h>
48
49 #include "libbpf.h"
50 #include "bpf.h"
51 #include "btf.h"
52 #include "str_error.h"
53 #include "libbpf_internal.h"
54 #include "hashmap.h"
55 #include "bpf_gen_internal.h"
56 #include "zip.h"
57
58 #ifndef BPF_FS_MAGIC
59 #define BPF_FS_MAGIC            0xcafe4a11
60 #endif
61
62 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
63
64 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
65  * compilation if user enables corresponding warning. Disable it explicitly.
66  */
67 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
68
69 #define __printf(a, b)  __attribute__((format(printf, a, b)))
70
71 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
72 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
73
74 static const char * const attach_type_name[] = {
75         [BPF_CGROUP_INET_INGRESS]       = "cgroup_inet_ingress",
76         [BPF_CGROUP_INET_EGRESS]        = "cgroup_inet_egress",
77         [BPF_CGROUP_INET_SOCK_CREATE]   = "cgroup_inet_sock_create",
78         [BPF_CGROUP_INET_SOCK_RELEASE]  = "cgroup_inet_sock_release",
79         [BPF_CGROUP_SOCK_OPS]           = "cgroup_sock_ops",
80         [BPF_CGROUP_DEVICE]             = "cgroup_device",
81         [BPF_CGROUP_INET4_BIND]         = "cgroup_inet4_bind",
82         [BPF_CGROUP_INET6_BIND]         = "cgroup_inet6_bind",
83         [BPF_CGROUP_INET4_CONNECT]      = "cgroup_inet4_connect",
84         [BPF_CGROUP_INET6_CONNECT]      = "cgroup_inet6_connect",
85         [BPF_CGROUP_INET4_POST_BIND]    = "cgroup_inet4_post_bind",
86         [BPF_CGROUP_INET6_POST_BIND]    = "cgroup_inet6_post_bind",
87         [BPF_CGROUP_INET4_GETPEERNAME]  = "cgroup_inet4_getpeername",
88         [BPF_CGROUP_INET6_GETPEERNAME]  = "cgroup_inet6_getpeername",
89         [BPF_CGROUP_INET4_GETSOCKNAME]  = "cgroup_inet4_getsockname",
90         [BPF_CGROUP_INET6_GETSOCKNAME]  = "cgroup_inet6_getsockname",
91         [BPF_CGROUP_UDP4_SENDMSG]       = "cgroup_udp4_sendmsg",
92         [BPF_CGROUP_UDP6_SENDMSG]       = "cgroup_udp6_sendmsg",
93         [BPF_CGROUP_SYSCTL]             = "cgroup_sysctl",
94         [BPF_CGROUP_UDP4_RECVMSG]       = "cgroup_udp4_recvmsg",
95         [BPF_CGROUP_UDP6_RECVMSG]       = "cgroup_udp6_recvmsg",
96         [BPF_CGROUP_GETSOCKOPT]         = "cgroup_getsockopt",
97         [BPF_CGROUP_SETSOCKOPT]         = "cgroup_setsockopt",
98         [BPF_SK_SKB_STREAM_PARSER]      = "sk_skb_stream_parser",
99         [BPF_SK_SKB_STREAM_VERDICT]     = "sk_skb_stream_verdict",
100         [BPF_SK_SKB_VERDICT]            = "sk_skb_verdict",
101         [BPF_SK_MSG_VERDICT]            = "sk_msg_verdict",
102         [BPF_LIRC_MODE2]                = "lirc_mode2",
103         [BPF_FLOW_DISSECTOR]            = "flow_dissector",
104         [BPF_TRACE_RAW_TP]              = "trace_raw_tp",
105         [BPF_TRACE_FENTRY]              = "trace_fentry",
106         [BPF_TRACE_FEXIT]               = "trace_fexit",
107         [BPF_MODIFY_RETURN]             = "modify_return",
108         [BPF_LSM_MAC]                   = "lsm_mac",
109         [BPF_LSM_CGROUP]                = "lsm_cgroup",
110         [BPF_SK_LOOKUP]                 = "sk_lookup",
111         [BPF_TRACE_ITER]                = "trace_iter",
112         [BPF_XDP_DEVMAP]                = "xdp_devmap",
113         [BPF_XDP_CPUMAP]                = "xdp_cpumap",
114         [BPF_XDP]                       = "xdp",
115         [BPF_SK_REUSEPORT_SELECT]       = "sk_reuseport_select",
116         [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE]    = "sk_reuseport_select_or_migrate",
117         [BPF_PERF_EVENT]                = "perf_event",
118         [BPF_TRACE_KPROBE_MULTI]        = "trace_kprobe_multi",
119 };
120
121 static const char * const link_type_name[] = {
122         [BPF_LINK_TYPE_UNSPEC]                  = "unspec",
123         [BPF_LINK_TYPE_RAW_TRACEPOINT]          = "raw_tracepoint",
124         [BPF_LINK_TYPE_TRACING]                 = "tracing",
125         [BPF_LINK_TYPE_CGROUP]                  = "cgroup",
126         [BPF_LINK_TYPE_ITER]                    = "iter",
127         [BPF_LINK_TYPE_NETNS]                   = "netns",
128         [BPF_LINK_TYPE_XDP]                     = "xdp",
129         [BPF_LINK_TYPE_PERF_EVENT]              = "perf_event",
130         [BPF_LINK_TYPE_KPROBE_MULTI]            = "kprobe_multi",
131         [BPF_LINK_TYPE_STRUCT_OPS]              = "struct_ops",
132 };
133
134 static const char * const map_type_name[] = {
135         [BPF_MAP_TYPE_UNSPEC]                   = "unspec",
136         [BPF_MAP_TYPE_HASH]                     = "hash",
137         [BPF_MAP_TYPE_ARRAY]                    = "array",
138         [BPF_MAP_TYPE_PROG_ARRAY]               = "prog_array",
139         [BPF_MAP_TYPE_PERF_EVENT_ARRAY]         = "perf_event_array",
140         [BPF_MAP_TYPE_PERCPU_HASH]              = "percpu_hash",
141         [BPF_MAP_TYPE_PERCPU_ARRAY]             = "percpu_array",
142         [BPF_MAP_TYPE_STACK_TRACE]              = "stack_trace",
143         [BPF_MAP_TYPE_CGROUP_ARRAY]             = "cgroup_array",
144         [BPF_MAP_TYPE_LRU_HASH]                 = "lru_hash",
145         [BPF_MAP_TYPE_LRU_PERCPU_HASH]          = "lru_percpu_hash",
146         [BPF_MAP_TYPE_LPM_TRIE]                 = "lpm_trie",
147         [BPF_MAP_TYPE_ARRAY_OF_MAPS]            = "array_of_maps",
148         [BPF_MAP_TYPE_HASH_OF_MAPS]             = "hash_of_maps",
149         [BPF_MAP_TYPE_DEVMAP]                   = "devmap",
150         [BPF_MAP_TYPE_DEVMAP_HASH]              = "devmap_hash",
151         [BPF_MAP_TYPE_SOCKMAP]                  = "sockmap",
152         [BPF_MAP_TYPE_CPUMAP]                   = "cpumap",
153         [BPF_MAP_TYPE_XSKMAP]                   = "xskmap",
154         [BPF_MAP_TYPE_SOCKHASH]                 = "sockhash",
155         [BPF_MAP_TYPE_CGROUP_STORAGE]           = "cgroup_storage",
156         [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY]      = "reuseport_sockarray",
157         [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE]    = "percpu_cgroup_storage",
158         [BPF_MAP_TYPE_QUEUE]                    = "queue",
159         [BPF_MAP_TYPE_STACK]                    = "stack",
160         [BPF_MAP_TYPE_SK_STORAGE]               = "sk_storage",
161         [BPF_MAP_TYPE_STRUCT_OPS]               = "struct_ops",
162         [BPF_MAP_TYPE_RINGBUF]                  = "ringbuf",
163         [BPF_MAP_TYPE_INODE_STORAGE]            = "inode_storage",
164         [BPF_MAP_TYPE_TASK_STORAGE]             = "task_storage",
165         [BPF_MAP_TYPE_BLOOM_FILTER]             = "bloom_filter",
166         [BPF_MAP_TYPE_USER_RINGBUF]             = "user_ringbuf",
167         [BPF_MAP_TYPE_CGRP_STORAGE]             = "cgrp_storage",
168 };
169
170 static const char * const prog_type_name[] = {
171         [BPF_PROG_TYPE_UNSPEC]                  = "unspec",
172         [BPF_PROG_TYPE_SOCKET_FILTER]           = "socket_filter",
173         [BPF_PROG_TYPE_KPROBE]                  = "kprobe",
174         [BPF_PROG_TYPE_SCHED_CLS]               = "sched_cls",
175         [BPF_PROG_TYPE_SCHED_ACT]               = "sched_act",
176         [BPF_PROG_TYPE_TRACEPOINT]              = "tracepoint",
177         [BPF_PROG_TYPE_XDP]                     = "xdp",
178         [BPF_PROG_TYPE_PERF_EVENT]              = "perf_event",
179         [BPF_PROG_TYPE_CGROUP_SKB]              = "cgroup_skb",
180         [BPF_PROG_TYPE_CGROUP_SOCK]             = "cgroup_sock",
181         [BPF_PROG_TYPE_LWT_IN]                  = "lwt_in",
182         [BPF_PROG_TYPE_LWT_OUT]                 = "lwt_out",
183         [BPF_PROG_TYPE_LWT_XMIT]                = "lwt_xmit",
184         [BPF_PROG_TYPE_SOCK_OPS]                = "sock_ops",
185         [BPF_PROG_TYPE_SK_SKB]                  = "sk_skb",
186         [BPF_PROG_TYPE_CGROUP_DEVICE]           = "cgroup_device",
187         [BPF_PROG_TYPE_SK_MSG]                  = "sk_msg",
188         [BPF_PROG_TYPE_RAW_TRACEPOINT]          = "raw_tracepoint",
189         [BPF_PROG_TYPE_CGROUP_SOCK_ADDR]        = "cgroup_sock_addr",
190         [BPF_PROG_TYPE_LWT_SEG6LOCAL]           = "lwt_seg6local",
191         [BPF_PROG_TYPE_LIRC_MODE2]              = "lirc_mode2",
192         [BPF_PROG_TYPE_SK_REUSEPORT]            = "sk_reuseport",
193         [BPF_PROG_TYPE_FLOW_DISSECTOR]          = "flow_dissector",
194         [BPF_PROG_TYPE_CGROUP_SYSCTL]           = "cgroup_sysctl",
195         [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
196         [BPF_PROG_TYPE_CGROUP_SOCKOPT]          = "cgroup_sockopt",
197         [BPF_PROG_TYPE_TRACING]                 = "tracing",
198         [BPF_PROG_TYPE_STRUCT_OPS]              = "struct_ops",
199         [BPF_PROG_TYPE_EXT]                     = "ext",
200         [BPF_PROG_TYPE_LSM]                     = "lsm",
201         [BPF_PROG_TYPE_SK_LOOKUP]               = "sk_lookup",
202         [BPF_PROG_TYPE_SYSCALL]                 = "syscall",
203 };
204
205 static int __base_pr(enum libbpf_print_level level, const char *format,
206                      va_list args)
207 {
208         if (level == LIBBPF_DEBUG)
209                 return 0;
210
211         return vfprintf(stderr, format, args);
212 }
213
214 static libbpf_print_fn_t __libbpf_pr = __base_pr;
215
216 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
217 {
218         libbpf_print_fn_t old_print_fn = __libbpf_pr;
219
220         __libbpf_pr = fn;
221         return old_print_fn;
222 }
223
224 __printf(2, 3)
225 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
226 {
227         va_list args;
228         int old_errno;
229
230         if (!__libbpf_pr)
231                 return;
232
233         old_errno = errno;
234
235         va_start(args, format);
236         __libbpf_pr(level, format, args);
237         va_end(args);
238
239         errno = old_errno;
240 }
241
242 static void pr_perm_msg(int err)
243 {
244         struct rlimit limit;
245         char buf[100];
246
247         if (err != -EPERM || geteuid() != 0)
248                 return;
249
250         err = getrlimit(RLIMIT_MEMLOCK, &limit);
251         if (err)
252                 return;
253
254         if (limit.rlim_cur == RLIM_INFINITY)
255                 return;
256
257         if (limit.rlim_cur < 1024)
258                 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
259         else if (limit.rlim_cur < 1024*1024)
260                 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
261         else
262                 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
263
264         pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
265                 buf);
266 }
267
268 #define STRERR_BUFSIZE  128
269
270 /* Copied from tools/perf/util/util.h */
271 #ifndef zfree
272 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
273 #endif
274
275 #ifndef zclose
276 # define zclose(fd) ({                  \
277         int ___err = 0;                 \
278         if ((fd) >= 0)                  \
279                 ___err = close((fd));   \
280         fd = -1;                        \
281         ___err; })
282 #endif
283
284 static inline __u64 ptr_to_u64(const void *ptr)
285 {
286         return (__u64) (unsigned long) ptr;
287 }
288
289 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
290 {
291         /* as of v1.0 libbpf_set_strict_mode() is a no-op */
292         return 0;
293 }
294
295 __u32 libbpf_major_version(void)
296 {
297         return LIBBPF_MAJOR_VERSION;
298 }
299
300 __u32 libbpf_minor_version(void)
301 {
302         return LIBBPF_MINOR_VERSION;
303 }
304
305 const char *libbpf_version_string(void)
306 {
307 #define __S(X) #X
308 #define _S(X) __S(X)
309         return  "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
310 #undef _S
311 #undef __S
312 }
313
314 enum reloc_type {
315         RELO_LD64,
316         RELO_CALL,
317         RELO_DATA,
318         RELO_EXTERN_VAR,
319         RELO_EXTERN_FUNC,
320         RELO_SUBPROG_ADDR,
321         RELO_CORE,
322 };
323
324 struct reloc_desc {
325         enum reloc_type type;
326         int insn_idx;
327         union {
328                 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
329                 struct {
330                         int map_idx;
331                         int sym_off;
332                 };
333         };
334 };
335
336 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
337 enum sec_def_flags {
338         SEC_NONE = 0,
339         /* expected_attach_type is optional, if kernel doesn't support that */
340         SEC_EXP_ATTACH_OPT = 1,
341         /* legacy, only used by libbpf_get_type_names() and
342          * libbpf_attach_type_by_name(), not used by libbpf itself at all.
343          * This used to be associated with cgroup (and few other) BPF programs
344          * that were attachable through BPF_PROG_ATTACH command. Pretty
345          * meaningless nowadays, though.
346          */
347         SEC_ATTACHABLE = 2,
348         SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
349         /* attachment target is specified through BTF ID in either kernel or
350          * other BPF program's BTF object
351          */
352         SEC_ATTACH_BTF = 4,
353         /* BPF program type allows sleeping/blocking in kernel */
354         SEC_SLEEPABLE = 8,
355         /* BPF program support non-linear XDP buffer */
356         SEC_XDP_FRAGS = 16,
357 };
358
359 struct bpf_sec_def {
360         char *sec;
361         enum bpf_prog_type prog_type;
362         enum bpf_attach_type expected_attach_type;
363         long cookie;
364         int handler_id;
365
366         libbpf_prog_setup_fn_t prog_setup_fn;
367         libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
368         libbpf_prog_attach_fn_t prog_attach_fn;
369 };
370
371 /*
372  * bpf_prog should be a better name but it has been used in
373  * linux/filter.h.
374  */
375 struct bpf_program {
376         char *name;
377         char *sec_name;
378         size_t sec_idx;
379         const struct bpf_sec_def *sec_def;
380         /* this program's instruction offset (in number of instructions)
381          * within its containing ELF section
382          */
383         size_t sec_insn_off;
384         /* number of original instructions in ELF section belonging to this
385          * program, not taking into account subprogram instructions possible
386          * appended later during relocation
387          */
388         size_t sec_insn_cnt;
389         /* Offset (in number of instructions) of the start of instruction
390          * belonging to this BPF program  within its containing main BPF
391          * program. For the entry-point (main) BPF program, this is always
392          * zero. For a sub-program, this gets reset before each of main BPF
393          * programs are processed and relocated and is used to determined
394          * whether sub-program was already appended to the main program, and
395          * if yes, at which instruction offset.
396          */
397         size_t sub_insn_off;
398
399         /* instructions that belong to BPF program; insns[0] is located at
400          * sec_insn_off instruction within its ELF section in ELF file, so
401          * when mapping ELF file instruction index to the local instruction,
402          * one needs to subtract sec_insn_off; and vice versa.
403          */
404         struct bpf_insn *insns;
405         /* actual number of instruction in this BPF program's image; for
406          * entry-point BPF programs this includes the size of main program
407          * itself plus all the used sub-programs, appended at the end
408          */
409         size_t insns_cnt;
410
411         struct reloc_desc *reloc_desc;
412         int nr_reloc;
413
414         /* BPF verifier log settings */
415         char *log_buf;
416         size_t log_size;
417         __u32 log_level;
418
419         struct bpf_object *obj;
420
421         int fd;
422         bool autoload;
423         bool autoattach;
424         bool mark_btf_static;
425         enum bpf_prog_type type;
426         enum bpf_attach_type expected_attach_type;
427
428         int prog_ifindex;
429         __u32 attach_btf_obj_fd;
430         __u32 attach_btf_id;
431         __u32 attach_prog_fd;
432
433         void *func_info;
434         __u32 func_info_rec_size;
435         __u32 func_info_cnt;
436
437         void *line_info;
438         __u32 line_info_rec_size;
439         __u32 line_info_cnt;
440         __u32 prog_flags;
441 };
442
443 struct bpf_struct_ops {
444         const char *tname;
445         const struct btf_type *type;
446         struct bpf_program **progs;
447         __u32 *kern_func_off;
448         /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
449         void *data;
450         /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
451          *      btf_vmlinux's format.
452          * struct bpf_struct_ops_tcp_congestion_ops {
453          *      [... some other kernel fields ...]
454          *      struct tcp_congestion_ops data;
455          * }
456          * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
457          * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
458          * from "data".
459          */
460         void *kern_vdata;
461         __u32 type_id;
462 };
463
464 #define DATA_SEC ".data"
465 #define BSS_SEC ".bss"
466 #define RODATA_SEC ".rodata"
467 #define KCONFIG_SEC ".kconfig"
468 #define KSYMS_SEC ".ksyms"
469 #define STRUCT_OPS_SEC ".struct_ops"
470
471 enum libbpf_map_type {
472         LIBBPF_MAP_UNSPEC,
473         LIBBPF_MAP_DATA,
474         LIBBPF_MAP_BSS,
475         LIBBPF_MAP_RODATA,
476         LIBBPF_MAP_KCONFIG,
477 };
478
479 struct bpf_map_def {
480         unsigned int type;
481         unsigned int key_size;
482         unsigned int value_size;
483         unsigned int max_entries;
484         unsigned int map_flags;
485 };
486
487 struct bpf_map {
488         struct bpf_object *obj;
489         char *name;
490         /* real_name is defined for special internal maps (.rodata*,
491          * .data*, .bss, .kconfig) and preserves their original ELF section
492          * name. This is important to be able to find corresponding BTF
493          * DATASEC information.
494          */
495         char *real_name;
496         int fd;
497         int sec_idx;
498         size_t sec_offset;
499         int map_ifindex;
500         int inner_map_fd;
501         struct bpf_map_def def;
502         __u32 numa_node;
503         __u32 btf_var_idx;
504         __u32 btf_key_type_id;
505         __u32 btf_value_type_id;
506         __u32 btf_vmlinux_value_type_id;
507         enum libbpf_map_type libbpf_type;
508         void *mmaped;
509         struct bpf_struct_ops *st_ops;
510         struct bpf_map *inner_map;
511         void **init_slots;
512         int init_slots_sz;
513         char *pin_path;
514         bool pinned;
515         bool reused;
516         bool autocreate;
517         __u64 map_extra;
518 };
519
520 enum extern_type {
521         EXT_UNKNOWN,
522         EXT_KCFG,
523         EXT_KSYM,
524 };
525
526 enum kcfg_type {
527         KCFG_UNKNOWN,
528         KCFG_CHAR,
529         KCFG_BOOL,
530         KCFG_INT,
531         KCFG_TRISTATE,
532         KCFG_CHAR_ARR,
533 };
534
535 struct extern_desc {
536         enum extern_type type;
537         int sym_idx;
538         int btf_id;
539         int sec_btf_id;
540         const char *name;
541         bool is_set;
542         bool is_weak;
543         union {
544                 struct {
545                         enum kcfg_type type;
546                         int sz;
547                         int align;
548                         int data_off;
549                         bool is_signed;
550                 } kcfg;
551                 struct {
552                         unsigned long long addr;
553
554                         /* target btf_id of the corresponding kernel var. */
555                         int kernel_btf_obj_fd;
556                         int kernel_btf_id;
557
558                         /* local btf_id of the ksym extern's type. */
559                         __u32 type_id;
560                         /* BTF fd index to be patched in for insn->off, this is
561                          * 0 for vmlinux BTF, index in obj->fd_array for module
562                          * BTF
563                          */
564                         __s16 btf_fd_idx;
565                 } ksym;
566         };
567 };
568
569 struct module_btf {
570         struct btf *btf;
571         char *name;
572         __u32 id;
573         int fd;
574         int fd_array_idx;
575 };
576
577 enum sec_type {
578         SEC_UNUSED = 0,
579         SEC_RELO,
580         SEC_BSS,
581         SEC_DATA,
582         SEC_RODATA,
583 };
584
585 struct elf_sec_desc {
586         enum sec_type sec_type;
587         Elf64_Shdr *shdr;
588         Elf_Data *data;
589 };
590
591 struct elf_state {
592         int fd;
593         const void *obj_buf;
594         size_t obj_buf_sz;
595         Elf *elf;
596         Elf64_Ehdr *ehdr;
597         Elf_Data *symbols;
598         Elf_Data *st_ops_data;
599         size_t shstrndx; /* section index for section name strings */
600         size_t strtabidx;
601         struct elf_sec_desc *secs;
602         size_t sec_cnt;
603         int btf_maps_shndx;
604         __u32 btf_maps_sec_btf_id;
605         int text_shndx;
606         int symbols_shndx;
607         int st_ops_shndx;
608 };
609
610 struct usdt_manager;
611
612 struct bpf_object {
613         char name[BPF_OBJ_NAME_LEN];
614         char license[64];
615         __u32 kern_version;
616
617         struct bpf_program *programs;
618         size_t nr_programs;
619         struct bpf_map *maps;
620         size_t nr_maps;
621         size_t maps_cap;
622
623         char *kconfig;
624         struct extern_desc *externs;
625         int nr_extern;
626         int kconfig_map_idx;
627
628         bool loaded;
629         bool has_subcalls;
630         bool has_rodata;
631
632         struct bpf_gen *gen_loader;
633
634         /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
635         struct elf_state efile;
636
637         struct btf *btf;
638         struct btf_ext *btf_ext;
639
640         /* Parse and load BTF vmlinux if any of the programs in the object need
641          * it at load time.
642          */
643         struct btf *btf_vmlinux;
644         /* Path to the custom BTF to be used for BPF CO-RE relocations as an
645          * override for vmlinux BTF.
646          */
647         char *btf_custom_path;
648         /* vmlinux BTF override for CO-RE relocations */
649         struct btf *btf_vmlinux_override;
650         /* Lazily initialized kernel module BTFs */
651         struct module_btf *btf_modules;
652         bool btf_modules_loaded;
653         size_t btf_module_cnt;
654         size_t btf_module_cap;
655
656         /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
657         char *log_buf;
658         size_t log_size;
659         __u32 log_level;
660
661         int *fd_array;
662         size_t fd_array_cap;
663         size_t fd_array_cnt;
664
665         struct usdt_manager *usdt_man;
666
667         char path[];
668 };
669
670 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
671 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
672 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
673 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
674 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
675 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
676 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
677 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
678 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
679
680 void bpf_program__unload(struct bpf_program *prog)
681 {
682         if (!prog)
683                 return;
684
685         zclose(prog->fd);
686
687         zfree(&prog->func_info);
688         zfree(&prog->line_info);
689 }
690
691 static void bpf_program__exit(struct bpf_program *prog)
692 {
693         if (!prog)
694                 return;
695
696         bpf_program__unload(prog);
697         zfree(&prog->name);
698         zfree(&prog->sec_name);
699         zfree(&prog->insns);
700         zfree(&prog->reloc_desc);
701
702         prog->nr_reloc = 0;
703         prog->insns_cnt = 0;
704         prog->sec_idx = -1;
705 }
706
707 static bool insn_is_subprog_call(const struct bpf_insn *insn)
708 {
709         return BPF_CLASS(insn->code) == BPF_JMP &&
710                BPF_OP(insn->code) == BPF_CALL &&
711                BPF_SRC(insn->code) == BPF_K &&
712                insn->src_reg == BPF_PSEUDO_CALL &&
713                insn->dst_reg == 0 &&
714                insn->off == 0;
715 }
716
717 static bool is_call_insn(const struct bpf_insn *insn)
718 {
719         return insn->code == (BPF_JMP | BPF_CALL);
720 }
721
722 static bool insn_is_pseudo_func(struct bpf_insn *insn)
723 {
724         return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
725 }
726
727 static int
728 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
729                       const char *name, size_t sec_idx, const char *sec_name,
730                       size_t sec_off, void *insn_data, size_t insn_data_sz)
731 {
732         if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
733                 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
734                         sec_name, name, sec_off, insn_data_sz);
735                 return -EINVAL;
736         }
737
738         memset(prog, 0, sizeof(*prog));
739         prog->obj = obj;
740
741         prog->sec_idx = sec_idx;
742         prog->sec_insn_off = sec_off / BPF_INSN_SZ;
743         prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
744         /* insns_cnt can later be increased by appending used subprograms */
745         prog->insns_cnt = prog->sec_insn_cnt;
746
747         prog->type = BPF_PROG_TYPE_UNSPEC;
748         prog->fd = -1;
749
750         /* libbpf's convention for SEC("?abc...") is that it's just like
751          * SEC("abc...") but the corresponding bpf_program starts out with
752          * autoload set to false.
753          */
754         if (sec_name[0] == '?') {
755                 prog->autoload = false;
756                 /* from now on forget there was ? in section name */
757                 sec_name++;
758         } else {
759                 prog->autoload = true;
760         }
761
762         prog->autoattach = true;
763
764         /* inherit object's log_level */
765         prog->log_level = obj->log_level;
766
767         prog->sec_name = strdup(sec_name);
768         if (!prog->sec_name)
769                 goto errout;
770
771         prog->name = strdup(name);
772         if (!prog->name)
773                 goto errout;
774
775         prog->insns = malloc(insn_data_sz);
776         if (!prog->insns)
777                 goto errout;
778         memcpy(prog->insns, insn_data, insn_data_sz);
779
780         return 0;
781 errout:
782         pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
783         bpf_program__exit(prog);
784         return -ENOMEM;
785 }
786
787 static int
788 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
789                          const char *sec_name, int sec_idx)
790 {
791         Elf_Data *symbols = obj->efile.symbols;
792         struct bpf_program *prog, *progs;
793         void *data = sec_data->d_buf;
794         size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
795         int nr_progs, err, i;
796         const char *name;
797         Elf64_Sym *sym;
798
799         progs = obj->programs;
800         nr_progs = obj->nr_programs;
801         nr_syms = symbols->d_size / sizeof(Elf64_Sym);
802
803         for (i = 0; i < nr_syms; i++) {
804                 sym = elf_sym_by_idx(obj, i);
805
806                 if (sym->st_shndx != sec_idx)
807                         continue;
808                 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
809                         continue;
810
811                 prog_sz = sym->st_size;
812                 sec_off = sym->st_value;
813
814                 name = elf_sym_str(obj, sym->st_name);
815                 if (!name) {
816                         pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
817                                 sec_name, sec_off);
818                         return -LIBBPF_ERRNO__FORMAT;
819                 }
820
821                 if (sec_off + prog_sz > sec_sz) {
822                         pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
823                                 sec_name, sec_off);
824                         return -LIBBPF_ERRNO__FORMAT;
825                 }
826
827                 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
828                         pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
829                         return -ENOTSUP;
830                 }
831
832                 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
833                          sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
834
835                 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
836                 if (!progs) {
837                         /*
838                          * In this case the original obj->programs
839                          * is still valid, so don't need special treat for
840                          * bpf_close_object().
841                          */
842                         pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
843                                 sec_name, name);
844                         return -ENOMEM;
845                 }
846                 obj->programs = progs;
847
848                 prog = &progs[nr_progs];
849
850                 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
851                                             sec_off, data + sec_off, prog_sz);
852                 if (err)
853                         return err;
854
855                 /* if function is a global/weak symbol, but has restricted
856                  * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
857                  * as static to enable more permissive BPF verification mode
858                  * with more outside context available to BPF verifier
859                  */
860                 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
861                     && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
862                         || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
863                         prog->mark_btf_static = true;
864
865                 nr_progs++;
866                 obj->nr_programs = nr_progs;
867         }
868
869         return 0;
870 }
871
872 static const struct btf_member *
873 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
874 {
875         struct btf_member *m;
876         int i;
877
878         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
879                 if (btf_member_bit_offset(t, i) == bit_offset)
880                         return m;
881         }
882
883         return NULL;
884 }
885
886 static const struct btf_member *
887 find_member_by_name(const struct btf *btf, const struct btf_type *t,
888                     const char *name)
889 {
890         struct btf_member *m;
891         int i;
892
893         for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
894                 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
895                         return m;
896         }
897
898         return NULL;
899 }
900
901 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
902 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
903                                    const char *name, __u32 kind);
904
905 static int
906 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
907                            const struct btf_type **type, __u32 *type_id,
908                            const struct btf_type **vtype, __u32 *vtype_id,
909                            const struct btf_member **data_member)
910 {
911         const struct btf_type *kern_type, *kern_vtype;
912         const struct btf_member *kern_data_member;
913         __s32 kern_vtype_id, kern_type_id;
914         __u32 i;
915
916         kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
917         if (kern_type_id < 0) {
918                 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
919                         tname);
920                 return kern_type_id;
921         }
922         kern_type = btf__type_by_id(btf, kern_type_id);
923
924         /* Find the corresponding "map_value" type that will be used
925          * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
926          * find "struct bpf_struct_ops_tcp_congestion_ops" from the
927          * btf_vmlinux.
928          */
929         kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
930                                                 tname, BTF_KIND_STRUCT);
931         if (kern_vtype_id < 0) {
932                 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
933                         STRUCT_OPS_VALUE_PREFIX, tname);
934                 return kern_vtype_id;
935         }
936         kern_vtype = btf__type_by_id(btf, kern_vtype_id);
937
938         /* Find "struct tcp_congestion_ops" from
939          * struct bpf_struct_ops_tcp_congestion_ops {
940          *      [ ... ]
941          *      struct tcp_congestion_ops data;
942          * }
943          */
944         kern_data_member = btf_members(kern_vtype);
945         for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
946                 if (kern_data_member->type == kern_type_id)
947                         break;
948         }
949         if (i == btf_vlen(kern_vtype)) {
950                 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
951                         tname, STRUCT_OPS_VALUE_PREFIX, tname);
952                 return -EINVAL;
953         }
954
955         *type = kern_type;
956         *type_id = kern_type_id;
957         *vtype = kern_vtype;
958         *vtype_id = kern_vtype_id;
959         *data_member = kern_data_member;
960
961         return 0;
962 }
963
964 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
965 {
966         return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
967 }
968
969 /* Init the map's fields that depend on kern_btf */
970 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
971                                          const struct btf *btf,
972                                          const struct btf *kern_btf)
973 {
974         const struct btf_member *member, *kern_member, *kern_data_member;
975         const struct btf_type *type, *kern_type, *kern_vtype;
976         __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
977         struct bpf_struct_ops *st_ops;
978         void *data, *kern_data;
979         const char *tname;
980         int err;
981
982         st_ops = map->st_ops;
983         type = st_ops->type;
984         tname = st_ops->tname;
985         err = find_struct_ops_kern_types(kern_btf, tname,
986                                          &kern_type, &kern_type_id,
987                                          &kern_vtype, &kern_vtype_id,
988                                          &kern_data_member);
989         if (err)
990                 return err;
991
992         pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
993                  map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
994
995         map->def.value_size = kern_vtype->size;
996         map->btf_vmlinux_value_type_id = kern_vtype_id;
997
998         st_ops->kern_vdata = calloc(1, kern_vtype->size);
999         if (!st_ops->kern_vdata)
1000                 return -ENOMEM;
1001
1002         data = st_ops->data;
1003         kern_data_off = kern_data_member->offset / 8;
1004         kern_data = st_ops->kern_vdata + kern_data_off;
1005
1006         member = btf_members(type);
1007         for (i = 0; i < btf_vlen(type); i++, member++) {
1008                 const struct btf_type *mtype, *kern_mtype;
1009                 __u32 mtype_id, kern_mtype_id;
1010                 void *mdata, *kern_mdata;
1011                 __s64 msize, kern_msize;
1012                 __u32 moff, kern_moff;
1013                 __u32 kern_member_idx;
1014                 const char *mname;
1015
1016                 mname = btf__name_by_offset(btf, member->name_off);
1017                 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1018                 if (!kern_member) {
1019                         pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1020                                 map->name, mname);
1021                         return -ENOTSUP;
1022                 }
1023
1024                 kern_member_idx = kern_member - btf_members(kern_type);
1025                 if (btf_member_bitfield_size(type, i) ||
1026                     btf_member_bitfield_size(kern_type, kern_member_idx)) {
1027                         pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1028                                 map->name, mname);
1029                         return -ENOTSUP;
1030                 }
1031
1032                 moff = member->offset / 8;
1033                 kern_moff = kern_member->offset / 8;
1034
1035                 mdata = data + moff;
1036                 kern_mdata = kern_data + kern_moff;
1037
1038                 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1039                 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1040                                                     &kern_mtype_id);
1041                 if (BTF_INFO_KIND(mtype->info) !=
1042                     BTF_INFO_KIND(kern_mtype->info)) {
1043                         pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1044                                 map->name, mname, BTF_INFO_KIND(mtype->info),
1045                                 BTF_INFO_KIND(kern_mtype->info));
1046                         return -ENOTSUP;
1047                 }
1048
1049                 if (btf_is_ptr(mtype)) {
1050                         struct bpf_program *prog;
1051
1052                         prog = st_ops->progs[i];
1053                         if (!prog)
1054                                 continue;
1055
1056                         kern_mtype = skip_mods_and_typedefs(kern_btf,
1057                                                             kern_mtype->type,
1058                                                             &kern_mtype_id);
1059
1060                         /* mtype->type must be a func_proto which was
1061                          * guaranteed in bpf_object__collect_st_ops_relos(),
1062                          * so only check kern_mtype for func_proto here.
1063                          */
1064                         if (!btf_is_func_proto(kern_mtype)) {
1065                                 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1066                                         map->name, mname);
1067                                 return -ENOTSUP;
1068                         }
1069
1070                         prog->attach_btf_id = kern_type_id;
1071                         prog->expected_attach_type = kern_member_idx;
1072
1073                         st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1074
1075                         pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1076                                  map->name, mname, prog->name, moff,
1077                                  kern_moff);
1078
1079                         continue;
1080                 }
1081
1082                 msize = btf__resolve_size(btf, mtype_id);
1083                 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1084                 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1085                         pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1086                                 map->name, mname, (ssize_t)msize,
1087                                 (ssize_t)kern_msize);
1088                         return -ENOTSUP;
1089                 }
1090
1091                 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1092                          map->name, mname, (unsigned int)msize,
1093                          moff, kern_moff);
1094                 memcpy(kern_mdata, mdata, msize);
1095         }
1096
1097         return 0;
1098 }
1099
1100 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1101 {
1102         struct bpf_map *map;
1103         size_t i;
1104         int err;
1105
1106         for (i = 0; i < obj->nr_maps; i++) {
1107                 map = &obj->maps[i];
1108
1109                 if (!bpf_map__is_struct_ops(map))
1110                         continue;
1111
1112                 err = bpf_map__init_kern_struct_ops(map, obj->btf,
1113                                                     obj->btf_vmlinux);
1114                 if (err)
1115                         return err;
1116         }
1117
1118         return 0;
1119 }
1120
1121 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1122 {
1123         const struct btf_type *type, *datasec;
1124         const struct btf_var_secinfo *vsi;
1125         struct bpf_struct_ops *st_ops;
1126         const char *tname, *var_name;
1127         __s32 type_id, datasec_id;
1128         const struct btf *btf;
1129         struct bpf_map *map;
1130         __u32 i;
1131
1132         if (obj->efile.st_ops_shndx == -1)
1133                 return 0;
1134
1135         btf = obj->btf;
1136         datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1137                                             BTF_KIND_DATASEC);
1138         if (datasec_id < 0) {
1139                 pr_warn("struct_ops init: DATASEC %s not found\n",
1140                         STRUCT_OPS_SEC);
1141                 return -EINVAL;
1142         }
1143
1144         datasec = btf__type_by_id(btf, datasec_id);
1145         vsi = btf_var_secinfos(datasec);
1146         for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1147                 type = btf__type_by_id(obj->btf, vsi->type);
1148                 var_name = btf__name_by_offset(obj->btf, type->name_off);
1149
1150                 type_id = btf__resolve_type(obj->btf, vsi->type);
1151                 if (type_id < 0) {
1152                         pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1153                                 vsi->type, STRUCT_OPS_SEC);
1154                         return -EINVAL;
1155                 }
1156
1157                 type = btf__type_by_id(obj->btf, type_id);
1158                 tname = btf__name_by_offset(obj->btf, type->name_off);
1159                 if (!tname[0]) {
1160                         pr_warn("struct_ops init: anonymous type is not supported\n");
1161                         return -ENOTSUP;
1162                 }
1163                 if (!btf_is_struct(type)) {
1164                         pr_warn("struct_ops init: %s is not a struct\n", tname);
1165                         return -EINVAL;
1166                 }
1167
1168                 map = bpf_object__add_map(obj);
1169                 if (IS_ERR(map))
1170                         return PTR_ERR(map);
1171
1172                 map->sec_idx = obj->efile.st_ops_shndx;
1173                 map->sec_offset = vsi->offset;
1174                 map->name = strdup(var_name);
1175                 if (!map->name)
1176                         return -ENOMEM;
1177
1178                 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1179                 map->def.key_size = sizeof(int);
1180                 map->def.value_size = type->size;
1181                 map->def.max_entries = 1;
1182
1183                 map->st_ops = calloc(1, sizeof(*map->st_ops));
1184                 if (!map->st_ops)
1185                         return -ENOMEM;
1186                 st_ops = map->st_ops;
1187                 st_ops->data = malloc(type->size);
1188                 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1189                 st_ops->kern_func_off = malloc(btf_vlen(type) *
1190                                                sizeof(*st_ops->kern_func_off));
1191                 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1192                         return -ENOMEM;
1193
1194                 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1195                         pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1196                                 var_name, STRUCT_OPS_SEC);
1197                         return -EINVAL;
1198                 }
1199
1200                 memcpy(st_ops->data,
1201                        obj->efile.st_ops_data->d_buf + vsi->offset,
1202                        type->size);
1203                 st_ops->tname = tname;
1204                 st_ops->type = type;
1205                 st_ops->type_id = type_id;
1206
1207                 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1208                          tname, type_id, var_name, vsi->offset);
1209         }
1210
1211         return 0;
1212 }
1213
1214 static struct bpf_object *bpf_object__new(const char *path,
1215                                           const void *obj_buf,
1216                                           size_t obj_buf_sz,
1217                                           const char *obj_name)
1218 {
1219         struct bpf_object *obj;
1220         char *end;
1221
1222         obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1223         if (!obj) {
1224                 pr_warn("alloc memory failed for %s\n", path);
1225                 return ERR_PTR(-ENOMEM);
1226         }
1227
1228         strcpy(obj->path, path);
1229         if (obj_name) {
1230                 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1231         } else {
1232                 /* Using basename() GNU version which doesn't modify arg. */
1233                 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1234                 end = strchr(obj->name, '.');
1235                 if (end)
1236                         *end = 0;
1237         }
1238
1239         obj->efile.fd = -1;
1240         /*
1241          * Caller of this function should also call
1242          * bpf_object__elf_finish() after data collection to return
1243          * obj_buf to user. If not, we should duplicate the buffer to
1244          * avoid user freeing them before elf finish.
1245          */
1246         obj->efile.obj_buf = obj_buf;
1247         obj->efile.obj_buf_sz = obj_buf_sz;
1248         obj->efile.btf_maps_shndx = -1;
1249         obj->efile.st_ops_shndx = -1;
1250         obj->kconfig_map_idx = -1;
1251
1252         obj->kern_version = get_kernel_version();
1253         obj->loaded = false;
1254
1255         return obj;
1256 }
1257
1258 static void bpf_object__elf_finish(struct bpf_object *obj)
1259 {
1260         if (!obj->efile.elf)
1261                 return;
1262
1263         elf_end(obj->efile.elf);
1264         obj->efile.elf = NULL;
1265         obj->efile.symbols = NULL;
1266         obj->efile.st_ops_data = NULL;
1267
1268         zfree(&obj->efile.secs);
1269         obj->efile.sec_cnt = 0;
1270         zclose(obj->efile.fd);
1271         obj->efile.obj_buf = NULL;
1272         obj->efile.obj_buf_sz = 0;
1273 }
1274
1275 static int bpf_object__elf_init(struct bpf_object *obj)
1276 {
1277         Elf64_Ehdr *ehdr;
1278         int err = 0;
1279         Elf *elf;
1280
1281         if (obj->efile.elf) {
1282                 pr_warn("elf: init internal error\n");
1283                 return -LIBBPF_ERRNO__LIBELF;
1284         }
1285
1286         if (obj->efile.obj_buf_sz > 0) {
1287                 /* obj_buf should have been validated by bpf_object__open_mem(). */
1288                 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1289         } else {
1290                 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1291                 if (obj->efile.fd < 0) {
1292                         char errmsg[STRERR_BUFSIZE], *cp;
1293
1294                         err = -errno;
1295                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1296                         pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1297                         return err;
1298                 }
1299
1300                 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1301         }
1302
1303         if (!elf) {
1304                 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1305                 err = -LIBBPF_ERRNO__LIBELF;
1306                 goto errout;
1307         }
1308
1309         obj->efile.elf = elf;
1310
1311         if (elf_kind(elf) != ELF_K_ELF) {
1312                 err = -LIBBPF_ERRNO__FORMAT;
1313                 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1314                 goto errout;
1315         }
1316
1317         if (gelf_getclass(elf) != ELFCLASS64) {
1318                 err = -LIBBPF_ERRNO__FORMAT;
1319                 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1320                 goto errout;
1321         }
1322
1323         obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1324         if (!obj->efile.ehdr) {
1325                 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1326                 err = -LIBBPF_ERRNO__FORMAT;
1327                 goto errout;
1328         }
1329
1330         if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1331                 pr_warn("elf: failed to get section names section index for %s: %s\n",
1332                         obj->path, elf_errmsg(-1));
1333                 err = -LIBBPF_ERRNO__FORMAT;
1334                 goto errout;
1335         }
1336
1337         /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1338         if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1339                 pr_warn("elf: failed to get section names strings from %s: %s\n",
1340                         obj->path, elf_errmsg(-1));
1341                 err = -LIBBPF_ERRNO__FORMAT;
1342                 goto errout;
1343         }
1344
1345         /* Old LLVM set e_machine to EM_NONE */
1346         if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1347                 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1348                 err = -LIBBPF_ERRNO__FORMAT;
1349                 goto errout;
1350         }
1351
1352         return 0;
1353 errout:
1354         bpf_object__elf_finish(obj);
1355         return err;
1356 }
1357
1358 static int bpf_object__check_endianness(struct bpf_object *obj)
1359 {
1360 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1361         if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1362                 return 0;
1363 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1364         if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1365                 return 0;
1366 #else
1367 # error "Unrecognized __BYTE_ORDER__"
1368 #endif
1369         pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1370         return -LIBBPF_ERRNO__ENDIAN;
1371 }
1372
1373 static int
1374 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1375 {
1376         if (!data) {
1377                 pr_warn("invalid license section in %s\n", obj->path);
1378                 return -LIBBPF_ERRNO__FORMAT;
1379         }
1380         /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1381          * go over allowed ELF data section buffer
1382          */
1383         libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1384         pr_debug("license of %s is %s\n", obj->path, obj->license);
1385         return 0;
1386 }
1387
1388 static int
1389 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1390 {
1391         __u32 kver;
1392
1393         if (!data || size != sizeof(kver)) {
1394                 pr_warn("invalid kver section in %s\n", obj->path);
1395                 return -LIBBPF_ERRNO__FORMAT;
1396         }
1397         memcpy(&kver, data, sizeof(kver));
1398         obj->kern_version = kver;
1399         pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1400         return 0;
1401 }
1402
1403 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1404 {
1405         if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1406             type == BPF_MAP_TYPE_HASH_OF_MAPS)
1407                 return true;
1408         return false;
1409 }
1410
1411 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1412 {
1413         Elf_Data *data;
1414         Elf_Scn *scn;
1415
1416         if (!name)
1417                 return -EINVAL;
1418
1419         scn = elf_sec_by_name(obj, name);
1420         data = elf_sec_data(obj, scn);
1421         if (data) {
1422                 *size = data->d_size;
1423                 return 0; /* found it */
1424         }
1425
1426         return -ENOENT;
1427 }
1428
1429 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1430 {
1431         Elf_Data *symbols = obj->efile.symbols;
1432         const char *sname;
1433         size_t si;
1434
1435         for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1436                 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1437
1438                 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1439                         continue;
1440
1441                 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1442                     ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1443                         continue;
1444
1445                 sname = elf_sym_str(obj, sym->st_name);
1446                 if (!sname) {
1447                         pr_warn("failed to get sym name string for var %s\n", name);
1448                         return ERR_PTR(-EIO);
1449                 }
1450                 if (strcmp(name, sname) == 0)
1451                         return sym;
1452         }
1453
1454         return ERR_PTR(-ENOENT);
1455 }
1456
1457 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1458 {
1459         struct bpf_map *map;
1460         int err;
1461
1462         err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1463                                 sizeof(*obj->maps), obj->nr_maps + 1);
1464         if (err)
1465                 return ERR_PTR(err);
1466
1467         map = &obj->maps[obj->nr_maps++];
1468         map->obj = obj;
1469         map->fd = -1;
1470         map->inner_map_fd = -1;
1471         map->autocreate = true;
1472
1473         return map;
1474 }
1475
1476 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1477 {
1478         long page_sz = sysconf(_SC_PAGE_SIZE);
1479         size_t map_sz;
1480
1481         map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1482         map_sz = roundup(map_sz, page_sz);
1483         return map_sz;
1484 }
1485
1486 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1487 {
1488         char map_name[BPF_OBJ_NAME_LEN], *p;
1489         int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1490
1491         /* This is one of the more confusing parts of libbpf for various
1492          * reasons, some of which are historical. The original idea for naming
1493          * internal names was to include as much of BPF object name prefix as
1494          * possible, so that it can be distinguished from similar internal
1495          * maps of a different BPF object.
1496          * As an example, let's say we have bpf_object named 'my_object_name'
1497          * and internal map corresponding to '.rodata' ELF section. The final
1498          * map name advertised to user and to the kernel will be
1499          * 'my_objec.rodata', taking first 8 characters of object name and
1500          * entire 7 characters of '.rodata'.
1501          * Somewhat confusingly, if internal map ELF section name is shorter
1502          * than 7 characters, e.g., '.bss', we still reserve 7 characters
1503          * for the suffix, even though we only have 4 actual characters, and
1504          * resulting map will be called 'my_objec.bss', not even using all 15
1505          * characters allowed by the kernel. Oh well, at least the truncated
1506          * object name is somewhat consistent in this case. But if the map
1507          * name is '.kconfig', we'll still have entirety of '.kconfig' added
1508          * (8 chars) and thus will be left with only first 7 characters of the
1509          * object name ('my_obje'). Happy guessing, user, that the final map
1510          * name will be "my_obje.kconfig".
1511          * Now, with libbpf starting to support arbitrarily named .rodata.*
1512          * and .data.* data sections, it's possible that ELF section name is
1513          * longer than allowed 15 chars, so we now need to be careful to take
1514          * only up to 15 first characters of ELF name, taking no BPF object
1515          * name characters at all. So '.rodata.abracadabra' will result in
1516          * '.rodata.abracad' kernel and user-visible name.
1517          * We need to keep this convoluted logic intact for .data, .bss and
1518          * .rodata maps, but for new custom .data.custom and .rodata.custom
1519          * maps we use their ELF names as is, not prepending bpf_object name
1520          * in front. We still need to truncate them to 15 characters for the
1521          * kernel. Full name can be recovered for such maps by using DATASEC
1522          * BTF type associated with such map's value type, though.
1523          */
1524         if (sfx_len >= BPF_OBJ_NAME_LEN)
1525                 sfx_len = BPF_OBJ_NAME_LEN - 1;
1526
1527         /* if there are two or more dots in map name, it's a custom dot map */
1528         if (strchr(real_name + 1, '.') != NULL)
1529                 pfx_len = 0;
1530         else
1531                 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1532
1533         snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1534                  sfx_len, real_name);
1535
1536         /* sanitise map name to characters allowed by kernel */
1537         for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1538                 if (!isalnum(*p) && *p != '_' && *p != '.')
1539                         *p = '_';
1540
1541         return strdup(map_name);
1542 }
1543
1544 static int
1545 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1546
1547 /* Internal BPF map is mmap()'able only if at least one of corresponding
1548  * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1549  * variable and it's not marked as __hidden (which turns it into, effectively,
1550  * a STATIC variable).
1551  */
1552 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1553 {
1554         const struct btf_type *t, *vt;
1555         struct btf_var_secinfo *vsi;
1556         int i, n;
1557
1558         if (!map->btf_value_type_id)
1559                 return false;
1560
1561         t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1562         if (!btf_is_datasec(t))
1563                 return false;
1564
1565         vsi = btf_var_secinfos(t);
1566         for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1567                 vt = btf__type_by_id(obj->btf, vsi->type);
1568                 if (!btf_is_var(vt))
1569                         continue;
1570
1571                 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1572                         return true;
1573         }
1574
1575         return false;
1576 }
1577
1578 static int
1579 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1580                               const char *real_name, int sec_idx, void *data, size_t data_sz)
1581 {
1582         struct bpf_map_def *def;
1583         struct bpf_map *map;
1584         int err;
1585
1586         map = bpf_object__add_map(obj);
1587         if (IS_ERR(map))
1588                 return PTR_ERR(map);
1589
1590         map->libbpf_type = type;
1591         map->sec_idx = sec_idx;
1592         map->sec_offset = 0;
1593         map->real_name = strdup(real_name);
1594         map->name = internal_map_name(obj, real_name);
1595         if (!map->real_name || !map->name) {
1596                 zfree(&map->real_name);
1597                 zfree(&map->name);
1598                 return -ENOMEM;
1599         }
1600
1601         def = &map->def;
1602         def->type = BPF_MAP_TYPE_ARRAY;
1603         def->key_size = sizeof(int);
1604         def->value_size = data_sz;
1605         def->max_entries = 1;
1606         def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1607                          ? BPF_F_RDONLY_PROG : 0;
1608
1609         /* failures are fine because of maps like .rodata.str1.1 */
1610         (void) map_fill_btf_type_info(obj, map);
1611
1612         if (map_is_mmapable(obj, map))
1613                 def->map_flags |= BPF_F_MMAPABLE;
1614
1615         pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1616                  map->name, map->sec_idx, map->sec_offset, def->map_flags);
1617
1618         map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1619                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1620         if (map->mmaped == MAP_FAILED) {
1621                 err = -errno;
1622                 map->mmaped = NULL;
1623                 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1624                         map->name, err);
1625                 zfree(&map->real_name);
1626                 zfree(&map->name);
1627                 return err;
1628         }
1629
1630         if (data)
1631                 memcpy(map->mmaped, data, data_sz);
1632
1633         pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1634         return 0;
1635 }
1636
1637 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1638 {
1639         struct elf_sec_desc *sec_desc;
1640         const char *sec_name;
1641         int err = 0, sec_idx;
1642
1643         /*
1644          * Populate obj->maps with libbpf internal maps.
1645          */
1646         for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1647                 sec_desc = &obj->efile.secs[sec_idx];
1648
1649                 /* Skip recognized sections with size 0. */
1650                 if (!sec_desc->data || sec_desc->data->d_size == 0)
1651                         continue;
1652
1653                 switch (sec_desc->sec_type) {
1654                 case SEC_DATA:
1655                         sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1656                         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1657                                                             sec_name, sec_idx,
1658                                                             sec_desc->data->d_buf,
1659                                                             sec_desc->data->d_size);
1660                         break;
1661                 case SEC_RODATA:
1662                         obj->has_rodata = true;
1663                         sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1664                         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1665                                                             sec_name, sec_idx,
1666                                                             sec_desc->data->d_buf,
1667                                                             sec_desc->data->d_size);
1668                         break;
1669                 case SEC_BSS:
1670                         sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1671                         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1672                                                             sec_name, sec_idx,
1673                                                             NULL,
1674                                                             sec_desc->data->d_size);
1675                         break;
1676                 default:
1677                         /* skip */
1678                         break;
1679                 }
1680                 if (err)
1681                         return err;
1682         }
1683         return 0;
1684 }
1685
1686
1687 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1688                                                const void *name)
1689 {
1690         int i;
1691
1692         for (i = 0; i < obj->nr_extern; i++) {
1693                 if (strcmp(obj->externs[i].name, name) == 0)
1694                         return &obj->externs[i];
1695         }
1696         return NULL;
1697 }
1698
1699 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1700                               char value)
1701 {
1702         switch (ext->kcfg.type) {
1703         case KCFG_BOOL:
1704                 if (value == 'm') {
1705                         pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
1706                                 ext->name, value);
1707                         return -EINVAL;
1708                 }
1709                 *(bool *)ext_val = value == 'y' ? true : false;
1710                 break;
1711         case KCFG_TRISTATE:
1712                 if (value == 'y')
1713                         *(enum libbpf_tristate *)ext_val = TRI_YES;
1714                 else if (value == 'm')
1715                         *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1716                 else /* value == 'n' */
1717                         *(enum libbpf_tristate *)ext_val = TRI_NO;
1718                 break;
1719         case KCFG_CHAR:
1720                 *(char *)ext_val = value;
1721                 break;
1722         case KCFG_UNKNOWN:
1723         case KCFG_INT:
1724         case KCFG_CHAR_ARR:
1725         default:
1726                 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
1727                         ext->name, value);
1728                 return -EINVAL;
1729         }
1730         ext->is_set = true;
1731         return 0;
1732 }
1733
1734 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1735                               const char *value)
1736 {
1737         size_t len;
1738
1739         if (ext->kcfg.type != KCFG_CHAR_ARR) {
1740                 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
1741                         ext->name, value);
1742                 return -EINVAL;
1743         }
1744
1745         len = strlen(value);
1746         if (value[len - 1] != '"') {
1747                 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1748                         ext->name, value);
1749                 return -EINVAL;
1750         }
1751
1752         /* strip quotes */
1753         len -= 2;
1754         if (len >= ext->kcfg.sz) {
1755                 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
1756                         ext->name, value, len, ext->kcfg.sz - 1);
1757                 len = ext->kcfg.sz - 1;
1758         }
1759         memcpy(ext_val, value + 1, len);
1760         ext_val[len] = '\0';
1761         ext->is_set = true;
1762         return 0;
1763 }
1764
1765 static int parse_u64(const char *value, __u64 *res)
1766 {
1767         char *value_end;
1768         int err;
1769
1770         errno = 0;
1771         *res = strtoull(value, &value_end, 0);
1772         if (errno) {
1773                 err = -errno;
1774                 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1775                 return err;
1776         }
1777         if (*value_end) {
1778                 pr_warn("failed to parse '%s' as integer completely\n", value);
1779                 return -EINVAL;
1780         }
1781         return 0;
1782 }
1783
1784 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1785 {
1786         int bit_sz = ext->kcfg.sz * 8;
1787
1788         if (ext->kcfg.sz == 8)
1789                 return true;
1790
1791         /* Validate that value stored in u64 fits in integer of `ext->sz`
1792          * bytes size without any loss of information. If the target integer
1793          * is signed, we rely on the following limits of integer type of
1794          * Y bits and subsequent transformation:
1795          *
1796          *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
1797          *            0 <= X + 2^(Y-1) <= 2^Y - 1
1798          *            0 <= X + 2^(Y-1) <  2^Y
1799          *
1800          *  For unsigned target integer, check that all the (64 - Y) bits are
1801          *  zero.
1802          */
1803         if (ext->kcfg.is_signed)
1804                 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1805         else
1806                 return (v >> bit_sz) == 0;
1807 }
1808
1809 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1810                               __u64 value)
1811 {
1812         if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
1813             ext->kcfg.type != KCFG_BOOL) {
1814                 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
1815                         ext->name, (unsigned long long)value);
1816                 return -EINVAL;
1817         }
1818         if (ext->kcfg.type == KCFG_BOOL && value > 1) {
1819                 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
1820                         ext->name, (unsigned long long)value);
1821                 return -EINVAL;
1822
1823         }
1824         if (!is_kcfg_value_in_range(ext, value)) {
1825                 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
1826                         ext->name, (unsigned long long)value, ext->kcfg.sz);
1827                 return -ERANGE;
1828         }
1829         switch (ext->kcfg.sz) {
1830         case 1:
1831                 *(__u8 *)ext_val = value;
1832                 break;
1833         case 2:
1834                 *(__u16 *)ext_val = value;
1835                 break;
1836         case 4:
1837                 *(__u32 *)ext_val = value;
1838                 break;
1839         case 8:
1840                 *(__u64 *)ext_val = value;
1841                 break;
1842         default:
1843                 return -EINVAL;
1844         }
1845         ext->is_set = true;
1846         return 0;
1847 }
1848
1849 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1850                                             char *buf, void *data)
1851 {
1852         struct extern_desc *ext;
1853         char *sep, *value;
1854         int len, err = 0;
1855         void *ext_val;
1856         __u64 num;
1857
1858         if (!str_has_pfx(buf, "CONFIG_"))
1859                 return 0;
1860
1861         sep = strchr(buf, '=');
1862         if (!sep) {
1863                 pr_warn("failed to parse '%s': no separator\n", buf);
1864                 return -EINVAL;
1865         }
1866
1867         /* Trim ending '\n' */
1868         len = strlen(buf);
1869         if (buf[len - 1] == '\n')
1870                 buf[len - 1] = '\0';
1871         /* Split on '=' and ensure that a value is present. */
1872         *sep = '\0';
1873         if (!sep[1]) {
1874                 *sep = '=';
1875                 pr_warn("failed to parse '%s': no value\n", buf);
1876                 return -EINVAL;
1877         }
1878
1879         ext = find_extern_by_name(obj, buf);
1880         if (!ext || ext->is_set)
1881                 return 0;
1882
1883         ext_val = data + ext->kcfg.data_off;
1884         value = sep + 1;
1885
1886         switch (*value) {
1887         case 'y': case 'n': case 'm':
1888                 err = set_kcfg_value_tri(ext, ext_val, *value);
1889                 break;
1890         case '"':
1891                 err = set_kcfg_value_str(ext, ext_val, value);
1892                 break;
1893         default:
1894                 /* assume integer */
1895                 err = parse_u64(value, &num);
1896                 if (err) {
1897                         pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
1898                         return err;
1899                 }
1900                 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1901                         pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
1902                         return -EINVAL;
1903                 }
1904                 err = set_kcfg_value_num(ext, ext_val, num);
1905                 break;
1906         }
1907         if (err)
1908                 return err;
1909         pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
1910         return 0;
1911 }
1912
1913 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1914 {
1915         char buf[PATH_MAX];
1916         struct utsname uts;
1917         int len, err = 0;
1918         gzFile file;
1919
1920         uname(&uts);
1921         len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1922         if (len < 0)
1923                 return -EINVAL;
1924         else if (len >= PATH_MAX)
1925                 return -ENAMETOOLONG;
1926
1927         /* gzopen also accepts uncompressed files. */
1928         file = gzopen(buf, "r");
1929         if (!file)
1930                 file = gzopen("/proc/config.gz", "r");
1931
1932         if (!file) {
1933                 pr_warn("failed to open system Kconfig\n");
1934                 return -ENOENT;
1935         }
1936
1937         while (gzgets(file, buf, sizeof(buf))) {
1938                 err = bpf_object__process_kconfig_line(obj, buf, data);
1939                 if (err) {
1940                         pr_warn("error parsing system Kconfig line '%s': %d\n",
1941                                 buf, err);
1942                         goto out;
1943                 }
1944         }
1945
1946 out:
1947         gzclose(file);
1948         return err;
1949 }
1950
1951 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1952                                         const char *config, void *data)
1953 {
1954         char buf[PATH_MAX];
1955         int err = 0;
1956         FILE *file;
1957
1958         file = fmemopen((void *)config, strlen(config), "r");
1959         if (!file) {
1960                 err = -errno;
1961                 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1962                 return err;
1963         }
1964
1965         while (fgets(buf, sizeof(buf), file)) {
1966                 err = bpf_object__process_kconfig_line(obj, buf, data);
1967                 if (err) {
1968                         pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1969                                 buf, err);
1970                         break;
1971                 }
1972         }
1973
1974         fclose(file);
1975         return err;
1976 }
1977
1978 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1979 {
1980         struct extern_desc *last_ext = NULL, *ext;
1981         size_t map_sz;
1982         int i, err;
1983
1984         for (i = 0; i < obj->nr_extern; i++) {
1985                 ext = &obj->externs[i];
1986                 if (ext->type == EXT_KCFG)
1987                         last_ext = ext;
1988         }
1989
1990         if (!last_ext)
1991                 return 0;
1992
1993         map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1994         err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1995                                             ".kconfig", obj->efile.symbols_shndx,
1996                                             NULL, map_sz);
1997         if (err)
1998                 return err;
1999
2000         obj->kconfig_map_idx = obj->nr_maps - 1;
2001
2002         return 0;
2003 }
2004
2005 const struct btf_type *
2006 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2007 {
2008         const struct btf_type *t = btf__type_by_id(btf, id);
2009
2010         if (res_id)
2011                 *res_id = id;
2012
2013         while (btf_is_mod(t) || btf_is_typedef(t)) {
2014                 if (res_id)
2015                         *res_id = t->type;
2016                 t = btf__type_by_id(btf, t->type);
2017         }
2018
2019         return t;
2020 }
2021
2022 static const struct btf_type *
2023 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2024 {
2025         const struct btf_type *t;
2026
2027         t = skip_mods_and_typedefs(btf, id, NULL);
2028         if (!btf_is_ptr(t))
2029                 return NULL;
2030
2031         t = skip_mods_and_typedefs(btf, t->type, res_id);
2032
2033         return btf_is_func_proto(t) ? t : NULL;
2034 }
2035
2036 static const char *__btf_kind_str(__u16 kind)
2037 {
2038         switch (kind) {
2039         case BTF_KIND_UNKN: return "void";
2040         case BTF_KIND_INT: return "int";
2041         case BTF_KIND_PTR: return "ptr";
2042         case BTF_KIND_ARRAY: return "array";
2043         case BTF_KIND_STRUCT: return "struct";
2044         case BTF_KIND_UNION: return "union";
2045         case BTF_KIND_ENUM: return "enum";
2046         case BTF_KIND_FWD: return "fwd";
2047         case BTF_KIND_TYPEDEF: return "typedef";
2048         case BTF_KIND_VOLATILE: return "volatile";
2049         case BTF_KIND_CONST: return "const";
2050         case BTF_KIND_RESTRICT: return "restrict";
2051         case BTF_KIND_FUNC: return "func";
2052         case BTF_KIND_FUNC_PROTO: return "func_proto";
2053         case BTF_KIND_VAR: return "var";
2054         case BTF_KIND_DATASEC: return "datasec";
2055         case BTF_KIND_FLOAT: return "float";
2056         case BTF_KIND_DECL_TAG: return "decl_tag";
2057         case BTF_KIND_TYPE_TAG: return "type_tag";
2058         case BTF_KIND_ENUM64: return "enum64";
2059         default: return "unknown";
2060         }
2061 }
2062
2063 const char *btf_kind_str(const struct btf_type *t)
2064 {
2065         return __btf_kind_str(btf_kind(t));
2066 }
2067
2068 /*
2069  * Fetch integer attribute of BTF map definition. Such attributes are
2070  * represented using a pointer to an array, in which dimensionality of array
2071  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2072  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2073  * type definition, while using only sizeof(void *) space in ELF data section.
2074  */
2075 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2076                               const struct btf_member *m, __u32 *res)
2077 {
2078         const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2079         const char *name = btf__name_by_offset(btf, m->name_off);
2080         const struct btf_array *arr_info;
2081         const struct btf_type *arr_t;
2082
2083         if (!btf_is_ptr(t)) {
2084                 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2085                         map_name, name, btf_kind_str(t));
2086                 return false;
2087         }
2088
2089         arr_t = btf__type_by_id(btf, t->type);
2090         if (!arr_t) {
2091                 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2092                         map_name, name, t->type);
2093                 return false;
2094         }
2095         if (!btf_is_array(arr_t)) {
2096                 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2097                         map_name, name, btf_kind_str(arr_t));
2098                 return false;
2099         }
2100         arr_info = btf_array(arr_t);
2101         *res = arr_info->nelems;
2102         return true;
2103 }
2104
2105 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2106 {
2107         int len;
2108
2109         len = snprintf(buf, buf_sz, "%s/%s", path, name);
2110         if (len < 0)
2111                 return -EINVAL;
2112         if (len >= buf_sz)
2113                 return -ENAMETOOLONG;
2114
2115         return 0;
2116 }
2117
2118 static int build_map_pin_path(struct bpf_map *map, const char *path)
2119 {
2120         char buf[PATH_MAX];
2121         int err;
2122
2123         if (!path)
2124                 path = "/sys/fs/bpf";
2125
2126         err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2127         if (err)
2128                 return err;
2129
2130         return bpf_map__set_pin_path(map, buf);
2131 }
2132
2133 /* should match definition in bpf_helpers.h */
2134 enum libbpf_pin_type {
2135         LIBBPF_PIN_NONE,
2136         /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2137         LIBBPF_PIN_BY_NAME,
2138 };
2139
2140 int parse_btf_map_def(const char *map_name, struct btf *btf,
2141                       const struct btf_type *def_t, bool strict,
2142                       struct btf_map_def *map_def, struct btf_map_def *inner_def)
2143 {
2144         const struct btf_type *t;
2145         const struct btf_member *m;
2146         bool is_inner = inner_def == NULL;
2147         int vlen, i;
2148
2149         vlen = btf_vlen(def_t);
2150         m = btf_members(def_t);
2151         for (i = 0; i < vlen; i++, m++) {
2152                 const char *name = btf__name_by_offset(btf, m->name_off);
2153
2154                 if (!name) {
2155                         pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2156                         return -EINVAL;
2157                 }
2158                 if (strcmp(name, "type") == 0) {
2159                         if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2160                                 return -EINVAL;
2161                         map_def->parts |= MAP_DEF_MAP_TYPE;
2162                 } else if (strcmp(name, "max_entries") == 0) {
2163                         if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2164                                 return -EINVAL;
2165                         map_def->parts |= MAP_DEF_MAX_ENTRIES;
2166                 } else if (strcmp(name, "map_flags") == 0) {
2167                         if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2168                                 return -EINVAL;
2169                         map_def->parts |= MAP_DEF_MAP_FLAGS;
2170                 } else if (strcmp(name, "numa_node") == 0) {
2171                         if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2172                                 return -EINVAL;
2173                         map_def->parts |= MAP_DEF_NUMA_NODE;
2174                 } else if (strcmp(name, "key_size") == 0) {
2175                         __u32 sz;
2176
2177                         if (!get_map_field_int(map_name, btf, m, &sz))
2178                                 return -EINVAL;
2179                         if (map_def->key_size && map_def->key_size != sz) {
2180                                 pr_warn("map '%s': conflicting key size %u != %u.\n",
2181                                         map_name, map_def->key_size, sz);
2182                                 return -EINVAL;
2183                         }
2184                         map_def->key_size = sz;
2185                         map_def->parts |= MAP_DEF_KEY_SIZE;
2186                 } else if (strcmp(name, "key") == 0) {
2187                         __s64 sz;
2188
2189                         t = btf__type_by_id(btf, m->type);
2190                         if (!t) {
2191                                 pr_warn("map '%s': key type [%d] not found.\n",
2192                                         map_name, m->type);
2193                                 return -EINVAL;
2194                         }
2195                         if (!btf_is_ptr(t)) {
2196                                 pr_warn("map '%s': key spec is not PTR: %s.\n",
2197                                         map_name, btf_kind_str(t));
2198                                 return -EINVAL;
2199                         }
2200                         sz = btf__resolve_size(btf, t->type);
2201                         if (sz < 0) {
2202                                 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2203                                         map_name, t->type, (ssize_t)sz);
2204                                 return sz;
2205                         }
2206                         if (map_def->key_size && map_def->key_size != sz) {
2207                                 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2208                                         map_name, map_def->key_size, (ssize_t)sz);
2209                                 return -EINVAL;
2210                         }
2211                         map_def->key_size = sz;
2212                         map_def->key_type_id = t->type;
2213                         map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2214                 } else if (strcmp(name, "value_size") == 0) {
2215                         __u32 sz;
2216
2217                         if (!get_map_field_int(map_name, btf, m, &sz))
2218                                 return -EINVAL;
2219                         if (map_def->value_size && map_def->value_size != sz) {
2220                                 pr_warn("map '%s': conflicting value size %u != %u.\n",
2221                                         map_name, map_def->value_size, sz);
2222                                 return -EINVAL;
2223                         }
2224                         map_def->value_size = sz;
2225                         map_def->parts |= MAP_DEF_VALUE_SIZE;
2226                 } else if (strcmp(name, "value") == 0) {
2227                         __s64 sz;
2228
2229                         t = btf__type_by_id(btf, m->type);
2230                         if (!t) {
2231                                 pr_warn("map '%s': value type [%d] not found.\n",
2232                                         map_name, m->type);
2233                                 return -EINVAL;
2234                         }
2235                         if (!btf_is_ptr(t)) {
2236                                 pr_warn("map '%s': value spec is not PTR: %s.\n",
2237                                         map_name, btf_kind_str(t));
2238                                 return -EINVAL;
2239                         }
2240                         sz = btf__resolve_size(btf, t->type);
2241                         if (sz < 0) {
2242                                 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2243                                         map_name, t->type, (ssize_t)sz);
2244                                 return sz;
2245                         }
2246                         if (map_def->value_size && map_def->value_size != sz) {
2247                                 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2248                                         map_name, map_def->value_size, (ssize_t)sz);
2249                                 return -EINVAL;
2250                         }
2251                         map_def->value_size = sz;
2252                         map_def->value_type_id = t->type;
2253                         map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2254                 }
2255                 else if (strcmp(name, "values") == 0) {
2256                         bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2257                         bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2258                         const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2259                         char inner_map_name[128];
2260                         int err;
2261
2262                         if (is_inner) {
2263                                 pr_warn("map '%s': multi-level inner maps not supported.\n",
2264                                         map_name);
2265                                 return -ENOTSUP;
2266                         }
2267                         if (i != vlen - 1) {
2268                                 pr_warn("map '%s': '%s' member should be last.\n",
2269                                         map_name, name);
2270                                 return -EINVAL;
2271                         }
2272                         if (!is_map_in_map && !is_prog_array) {
2273                                 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2274                                         map_name);
2275                                 return -ENOTSUP;
2276                         }
2277                         if (map_def->value_size && map_def->value_size != 4) {
2278                                 pr_warn("map '%s': conflicting value size %u != 4.\n",
2279                                         map_name, map_def->value_size);
2280                                 return -EINVAL;
2281                         }
2282                         map_def->value_size = 4;
2283                         t = btf__type_by_id(btf, m->type);
2284                         if (!t) {
2285                                 pr_warn("map '%s': %s type [%d] not found.\n",
2286                                         map_name, desc, m->type);
2287                                 return -EINVAL;
2288                         }
2289                         if (!btf_is_array(t) || btf_array(t)->nelems) {
2290                                 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2291                                         map_name, desc);
2292                                 return -EINVAL;
2293                         }
2294                         t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2295                         if (!btf_is_ptr(t)) {
2296                                 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2297                                         map_name, desc, btf_kind_str(t));
2298                                 return -EINVAL;
2299                         }
2300                         t = skip_mods_and_typedefs(btf, t->type, NULL);
2301                         if (is_prog_array) {
2302                                 if (!btf_is_func_proto(t)) {
2303                                         pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2304                                                 map_name, btf_kind_str(t));
2305                                         return -EINVAL;
2306                                 }
2307                                 continue;
2308                         }
2309                         if (!btf_is_struct(t)) {
2310                                 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2311                                         map_name, btf_kind_str(t));
2312                                 return -EINVAL;
2313                         }
2314
2315                         snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2316                         err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2317                         if (err)
2318                                 return err;
2319
2320                         map_def->parts |= MAP_DEF_INNER_MAP;
2321                 } else if (strcmp(name, "pinning") == 0) {
2322                         __u32 val;
2323
2324                         if (is_inner) {
2325                                 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2326                                 return -EINVAL;
2327                         }
2328                         if (!get_map_field_int(map_name, btf, m, &val))
2329                                 return -EINVAL;
2330                         if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2331                                 pr_warn("map '%s': invalid pinning value %u.\n",
2332                                         map_name, val);
2333                                 return -EINVAL;
2334                         }
2335                         map_def->pinning = val;
2336                         map_def->parts |= MAP_DEF_PINNING;
2337                 } else if (strcmp(name, "map_extra") == 0) {
2338                         __u32 map_extra;
2339
2340                         if (!get_map_field_int(map_name, btf, m, &map_extra))
2341                                 return -EINVAL;
2342                         map_def->map_extra = map_extra;
2343                         map_def->parts |= MAP_DEF_MAP_EXTRA;
2344                 } else {
2345                         if (strict) {
2346                                 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2347                                 return -ENOTSUP;
2348                         }
2349                         pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2350                 }
2351         }
2352
2353         if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2354                 pr_warn("map '%s': map type isn't specified.\n", map_name);
2355                 return -EINVAL;
2356         }
2357
2358         return 0;
2359 }
2360
2361 static size_t adjust_ringbuf_sz(size_t sz)
2362 {
2363         __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2364         __u32 mul;
2365
2366         /* if user forgot to set any size, make sure they see error */
2367         if (sz == 0)
2368                 return 0;
2369         /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2370          * a power-of-2 multiple of kernel's page size. If user diligently
2371          * satisified these conditions, pass the size through.
2372          */
2373         if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2374                 return sz;
2375
2376         /* Otherwise find closest (page_sz * power_of_2) product bigger than
2377          * user-set size to satisfy both user size request and kernel
2378          * requirements and substitute correct max_entries for map creation.
2379          */
2380         for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2381                 if (mul * page_sz > sz)
2382                         return mul * page_sz;
2383         }
2384
2385         /* if it's impossible to satisfy the conditions (i.e., user size is
2386          * very close to UINT_MAX but is not a power-of-2 multiple of
2387          * page_size) then just return original size and let kernel reject it
2388          */
2389         return sz;
2390 }
2391
2392 static bool map_is_ringbuf(const struct bpf_map *map)
2393 {
2394         return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2395                map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2396 }
2397
2398 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2399 {
2400         map->def.type = def->map_type;
2401         map->def.key_size = def->key_size;
2402         map->def.value_size = def->value_size;
2403         map->def.max_entries = def->max_entries;
2404         map->def.map_flags = def->map_flags;
2405         map->map_extra = def->map_extra;
2406
2407         map->numa_node = def->numa_node;
2408         map->btf_key_type_id = def->key_type_id;
2409         map->btf_value_type_id = def->value_type_id;
2410
2411         /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2412         if (map_is_ringbuf(map))
2413                 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2414
2415         if (def->parts & MAP_DEF_MAP_TYPE)
2416                 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2417
2418         if (def->parts & MAP_DEF_KEY_TYPE)
2419                 pr_debug("map '%s': found key [%u], sz = %u.\n",
2420                          map->name, def->key_type_id, def->key_size);
2421         else if (def->parts & MAP_DEF_KEY_SIZE)
2422                 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2423
2424         if (def->parts & MAP_DEF_VALUE_TYPE)
2425                 pr_debug("map '%s': found value [%u], sz = %u.\n",
2426                          map->name, def->value_type_id, def->value_size);
2427         else if (def->parts & MAP_DEF_VALUE_SIZE)
2428                 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2429
2430         if (def->parts & MAP_DEF_MAX_ENTRIES)
2431                 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2432         if (def->parts & MAP_DEF_MAP_FLAGS)
2433                 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2434         if (def->parts & MAP_DEF_MAP_EXTRA)
2435                 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2436                          (unsigned long long)def->map_extra);
2437         if (def->parts & MAP_DEF_PINNING)
2438                 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2439         if (def->parts & MAP_DEF_NUMA_NODE)
2440                 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2441
2442         if (def->parts & MAP_DEF_INNER_MAP)
2443                 pr_debug("map '%s': found inner map definition.\n", map->name);
2444 }
2445
2446 static const char *btf_var_linkage_str(__u32 linkage)
2447 {
2448         switch (linkage) {
2449         case BTF_VAR_STATIC: return "static";
2450         case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2451         case BTF_VAR_GLOBAL_EXTERN: return "extern";
2452         default: return "unknown";
2453         }
2454 }
2455
2456 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2457                                          const struct btf_type *sec,
2458                                          int var_idx, int sec_idx,
2459                                          const Elf_Data *data, bool strict,
2460                                          const char *pin_root_path)
2461 {
2462         struct btf_map_def map_def = {}, inner_def = {};
2463         const struct btf_type *var, *def;
2464         const struct btf_var_secinfo *vi;
2465         const struct btf_var *var_extra;
2466         const char *map_name;
2467         struct bpf_map *map;
2468         int err;
2469
2470         vi = btf_var_secinfos(sec) + var_idx;
2471         var = btf__type_by_id(obj->btf, vi->type);
2472         var_extra = btf_var(var);
2473         map_name = btf__name_by_offset(obj->btf, var->name_off);
2474
2475         if (map_name == NULL || map_name[0] == '\0') {
2476                 pr_warn("map #%d: empty name.\n", var_idx);
2477                 return -EINVAL;
2478         }
2479         if ((__u64)vi->offset + vi->size > data->d_size) {
2480                 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2481                 return -EINVAL;
2482         }
2483         if (!btf_is_var(var)) {
2484                 pr_warn("map '%s': unexpected var kind %s.\n",
2485                         map_name, btf_kind_str(var));
2486                 return -EINVAL;
2487         }
2488         if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2489                 pr_warn("map '%s': unsupported map linkage %s.\n",
2490                         map_name, btf_var_linkage_str(var_extra->linkage));
2491                 return -EOPNOTSUPP;
2492         }
2493
2494         def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2495         if (!btf_is_struct(def)) {
2496                 pr_warn("map '%s': unexpected def kind %s.\n",
2497                         map_name, btf_kind_str(var));
2498                 return -EINVAL;
2499         }
2500         if (def->size > vi->size) {
2501                 pr_warn("map '%s': invalid def size.\n", map_name);
2502                 return -EINVAL;
2503         }
2504
2505         map = bpf_object__add_map(obj);
2506         if (IS_ERR(map))
2507                 return PTR_ERR(map);
2508         map->name = strdup(map_name);
2509         if (!map->name) {
2510                 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2511                 return -ENOMEM;
2512         }
2513         map->libbpf_type = LIBBPF_MAP_UNSPEC;
2514         map->def.type = BPF_MAP_TYPE_UNSPEC;
2515         map->sec_idx = sec_idx;
2516         map->sec_offset = vi->offset;
2517         map->btf_var_idx = var_idx;
2518         pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2519                  map_name, map->sec_idx, map->sec_offset);
2520
2521         err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2522         if (err)
2523                 return err;
2524
2525         fill_map_from_def(map, &map_def);
2526
2527         if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2528                 err = build_map_pin_path(map, pin_root_path);
2529                 if (err) {
2530                         pr_warn("map '%s': couldn't build pin path.\n", map->name);
2531                         return err;
2532                 }
2533         }
2534
2535         if (map_def.parts & MAP_DEF_INNER_MAP) {
2536                 map->inner_map = calloc(1, sizeof(*map->inner_map));
2537                 if (!map->inner_map)
2538                         return -ENOMEM;
2539                 map->inner_map->fd = -1;
2540                 map->inner_map->sec_idx = sec_idx;
2541                 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2542                 if (!map->inner_map->name)
2543                         return -ENOMEM;
2544                 sprintf(map->inner_map->name, "%s.inner", map_name);
2545
2546                 fill_map_from_def(map->inner_map, &inner_def);
2547         }
2548
2549         err = map_fill_btf_type_info(obj, map);
2550         if (err)
2551                 return err;
2552
2553         return 0;
2554 }
2555
2556 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2557                                           const char *pin_root_path)
2558 {
2559         const struct btf_type *sec = NULL;
2560         int nr_types, i, vlen, err;
2561         const struct btf_type *t;
2562         const char *name;
2563         Elf_Data *data;
2564         Elf_Scn *scn;
2565
2566         if (obj->efile.btf_maps_shndx < 0)
2567                 return 0;
2568
2569         scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2570         data = elf_sec_data(obj, scn);
2571         if (!scn || !data) {
2572                 pr_warn("elf: failed to get %s map definitions for %s\n",
2573                         MAPS_ELF_SEC, obj->path);
2574                 return -EINVAL;
2575         }
2576
2577         nr_types = btf__type_cnt(obj->btf);
2578         for (i = 1; i < nr_types; i++) {
2579                 t = btf__type_by_id(obj->btf, i);
2580                 if (!btf_is_datasec(t))
2581                         continue;
2582                 name = btf__name_by_offset(obj->btf, t->name_off);
2583                 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2584                         sec = t;
2585                         obj->efile.btf_maps_sec_btf_id = i;
2586                         break;
2587                 }
2588         }
2589
2590         if (!sec) {
2591                 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2592                 return -ENOENT;
2593         }
2594
2595         vlen = btf_vlen(sec);
2596         for (i = 0; i < vlen; i++) {
2597                 err = bpf_object__init_user_btf_map(obj, sec, i,
2598                                                     obj->efile.btf_maps_shndx,
2599                                                     data, strict,
2600                                                     pin_root_path);
2601                 if (err)
2602                         return err;
2603         }
2604
2605         return 0;
2606 }
2607
2608 static int bpf_object__init_maps(struct bpf_object *obj,
2609                                  const struct bpf_object_open_opts *opts)
2610 {
2611         const char *pin_root_path;
2612         bool strict;
2613         int err = 0;
2614
2615         strict = !OPTS_GET(opts, relaxed_maps, false);
2616         pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2617
2618         err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2619         err = err ?: bpf_object__init_global_data_maps(obj);
2620         err = err ?: bpf_object__init_kconfig_map(obj);
2621         err = err ?: bpf_object__init_struct_ops_maps(obj);
2622
2623         return err;
2624 }
2625
2626 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2627 {
2628         Elf64_Shdr *sh;
2629
2630         sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2631         if (!sh)
2632                 return false;
2633
2634         return sh->sh_flags & SHF_EXECINSTR;
2635 }
2636
2637 static bool btf_needs_sanitization(struct bpf_object *obj)
2638 {
2639         bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2640         bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2641         bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2642         bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2643         bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2644         bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2645         bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2646
2647         return !has_func || !has_datasec || !has_func_global || !has_float ||
2648                !has_decl_tag || !has_type_tag || !has_enum64;
2649 }
2650
2651 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2652 {
2653         bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2654         bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2655         bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2656         bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2657         bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2658         bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2659         bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2660         int enum64_placeholder_id = 0;
2661         struct btf_type *t;
2662         int i, j, vlen;
2663
2664         for (i = 1; i < btf__type_cnt(btf); i++) {
2665                 t = (struct btf_type *)btf__type_by_id(btf, i);
2666
2667                 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2668                         /* replace VAR/DECL_TAG with INT */
2669                         t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2670                         /*
2671                          * using size = 1 is the safest choice, 4 will be too
2672                          * big and cause kernel BTF validation failure if
2673                          * original variable took less than 4 bytes
2674                          */
2675                         t->size = 1;
2676                         *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2677                 } else if (!has_datasec && btf_is_datasec(t)) {
2678                         /* replace DATASEC with STRUCT */
2679                         const struct btf_var_secinfo *v = btf_var_secinfos(t);
2680                         struct btf_member *m = btf_members(t);
2681                         struct btf_type *vt;
2682                         char *name;
2683
2684                         name = (char *)btf__name_by_offset(btf, t->name_off);
2685                         while (*name) {
2686                                 if (*name == '.')
2687                                         *name = '_';
2688                                 name++;
2689                         }
2690
2691                         vlen = btf_vlen(t);
2692                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2693                         for (j = 0; j < vlen; j++, v++, m++) {
2694                                 /* order of field assignments is important */
2695                                 m->offset = v->offset * 8;
2696                                 m->type = v->type;
2697                                 /* preserve variable name as member name */
2698                                 vt = (void *)btf__type_by_id(btf, v->type);
2699                                 m->name_off = vt->name_off;
2700                         }
2701                 } else if (!has_func && btf_is_func_proto(t)) {
2702                         /* replace FUNC_PROTO with ENUM */
2703                         vlen = btf_vlen(t);
2704                         t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2705                         t->size = sizeof(__u32); /* kernel enforced */
2706                 } else if (!has_func && btf_is_func(t)) {
2707                         /* replace FUNC with TYPEDEF */
2708                         t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2709                 } else if (!has_func_global && btf_is_func(t)) {
2710                         /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2711                         t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2712                 } else if (!has_float && btf_is_float(t)) {
2713                         /* replace FLOAT with an equally-sized empty STRUCT;
2714                          * since C compilers do not accept e.g. "float" as a
2715                          * valid struct name, make it anonymous
2716                          */
2717                         t->name_off = 0;
2718                         t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2719                 } else if (!has_type_tag && btf_is_type_tag(t)) {
2720                         /* replace TYPE_TAG with a CONST */
2721                         t->name_off = 0;
2722                         t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
2723                 } else if (!has_enum64 && btf_is_enum(t)) {
2724                         /* clear the kflag */
2725                         t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
2726                 } else if (!has_enum64 && btf_is_enum64(t)) {
2727                         /* replace ENUM64 with a union */
2728                         struct btf_member *m;
2729
2730                         if (enum64_placeholder_id == 0) {
2731                                 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
2732                                 if (enum64_placeholder_id < 0)
2733                                         return enum64_placeholder_id;
2734
2735                                 t = (struct btf_type *)btf__type_by_id(btf, i);
2736                         }
2737
2738                         m = btf_members(t);
2739                         vlen = btf_vlen(t);
2740                         t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
2741                         for (j = 0; j < vlen; j++, m++) {
2742                                 m->type = enum64_placeholder_id;
2743                                 m->offset = 0;
2744                         }
2745                 }
2746         }
2747
2748         return 0;
2749 }
2750
2751 static bool libbpf_needs_btf(const struct bpf_object *obj)
2752 {
2753         return obj->efile.btf_maps_shndx >= 0 ||
2754                obj->efile.st_ops_shndx >= 0 ||
2755                obj->nr_extern > 0;
2756 }
2757
2758 static bool kernel_needs_btf(const struct bpf_object *obj)
2759 {
2760         return obj->efile.st_ops_shndx >= 0;
2761 }
2762
2763 static int bpf_object__init_btf(struct bpf_object *obj,
2764                                 Elf_Data *btf_data,
2765                                 Elf_Data *btf_ext_data)
2766 {
2767         int err = -ENOENT;
2768
2769         if (btf_data) {
2770                 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2771                 err = libbpf_get_error(obj->btf);
2772                 if (err) {
2773                         obj->btf = NULL;
2774                         pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2775                         goto out;
2776                 }
2777                 /* enforce 8-byte pointers for BPF-targeted BTFs */
2778                 btf__set_pointer_size(obj->btf, 8);
2779         }
2780         if (btf_ext_data) {
2781                 struct btf_ext_info *ext_segs[3];
2782                 int seg_num, sec_num;
2783
2784                 if (!obj->btf) {
2785                         pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2786                                  BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2787                         goto out;
2788                 }
2789                 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2790                 err = libbpf_get_error(obj->btf_ext);
2791                 if (err) {
2792                         pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2793                                 BTF_EXT_ELF_SEC, err);
2794                         obj->btf_ext = NULL;
2795                         goto out;
2796                 }
2797
2798                 /* setup .BTF.ext to ELF section mapping */
2799                 ext_segs[0] = &obj->btf_ext->func_info;
2800                 ext_segs[1] = &obj->btf_ext->line_info;
2801                 ext_segs[2] = &obj->btf_ext->core_relo_info;
2802                 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
2803                         struct btf_ext_info *seg = ext_segs[seg_num];
2804                         const struct btf_ext_info_sec *sec;
2805                         const char *sec_name;
2806                         Elf_Scn *scn;
2807
2808                         if (seg->sec_cnt == 0)
2809                                 continue;
2810
2811                         seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
2812                         if (!seg->sec_idxs) {
2813                                 err = -ENOMEM;
2814                                 goto out;
2815                         }
2816
2817                         sec_num = 0;
2818                         for_each_btf_ext_sec(seg, sec) {
2819                                 /* preventively increment index to avoid doing
2820                                  * this before every continue below
2821                                  */
2822                                 sec_num++;
2823
2824                                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
2825                                 if (str_is_empty(sec_name))
2826                                         continue;
2827                                 scn = elf_sec_by_name(obj, sec_name);
2828                                 if (!scn)
2829                                         continue;
2830
2831                                 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
2832                         }
2833                 }
2834         }
2835 out:
2836         if (err && libbpf_needs_btf(obj)) {
2837                 pr_warn("BTF is required, but is missing or corrupted.\n");
2838                 return err;
2839         }
2840         return 0;
2841 }
2842
2843 static int compare_vsi_off(const void *_a, const void *_b)
2844 {
2845         const struct btf_var_secinfo *a = _a;
2846         const struct btf_var_secinfo *b = _b;
2847
2848         return a->offset - b->offset;
2849 }
2850
2851 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
2852                              struct btf_type *t)
2853 {
2854         __u32 size = 0, i, vars = btf_vlen(t);
2855         const char *sec_name = btf__name_by_offset(btf, t->name_off);
2856         struct btf_var_secinfo *vsi;
2857         bool fixup_offsets = false;
2858         int err;
2859
2860         if (!sec_name) {
2861                 pr_debug("No name found in string section for DATASEC kind.\n");
2862                 return -ENOENT;
2863         }
2864
2865         /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
2866          * variable offsets set at the previous step. Further, not every
2867          * extern BTF VAR has corresponding ELF symbol preserved, so we skip
2868          * all fixups altogether for such sections and go straight to sorting
2869          * VARs within their DATASEC.
2870          */
2871         if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
2872                 goto sort_vars;
2873
2874         /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
2875          * fix this up. But BPF static linker already fixes this up and fills
2876          * all the sizes and offsets during static linking. So this step has
2877          * to be optional. But the STV_HIDDEN handling is non-optional for any
2878          * non-extern DATASEC, so the variable fixup loop below handles both
2879          * functions at the same time, paying the cost of BTF VAR <-> ELF
2880          * symbol matching just once.
2881          */
2882         if (t->size == 0) {
2883                 err = find_elf_sec_sz(obj, sec_name, &size);
2884                 if (err || !size) {
2885                         pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
2886                                  sec_name, size, err);
2887                         return -ENOENT;
2888                 }
2889
2890                 t->size = size;
2891                 fixup_offsets = true;
2892         }
2893
2894         for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
2895                 const struct btf_type *t_var;
2896                 struct btf_var *var;
2897                 const char *var_name;
2898                 Elf64_Sym *sym;
2899
2900                 t_var = btf__type_by_id(btf, vsi->type);
2901                 if (!t_var || !btf_is_var(t_var)) {
2902                         pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
2903                         return -EINVAL;
2904                 }
2905
2906                 var = btf_var(t_var);
2907                 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
2908                         continue;
2909
2910                 var_name = btf__name_by_offset(btf, t_var->name_off);
2911                 if (!var_name) {
2912                         pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
2913                                  sec_name, i);
2914                         return -ENOENT;
2915                 }
2916
2917                 sym = find_elf_var_sym(obj, var_name);
2918                 if (IS_ERR(sym)) {
2919                         pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
2920                                  sec_name, var_name);
2921                         return -ENOENT;
2922                 }
2923
2924                 if (fixup_offsets)
2925                         vsi->offset = sym->st_value;
2926
2927                 /* if variable is a global/weak symbol, but has restricted
2928                  * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
2929                  * as static. This follows similar logic for functions (BPF
2930                  * subprogs) and influences libbpf's further decisions about
2931                  * whether to make global data BPF array maps as
2932                  * BPF_F_MMAPABLE.
2933                  */
2934                 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
2935                     || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
2936                         var->linkage = BTF_VAR_STATIC;
2937         }
2938
2939 sort_vars:
2940         qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
2941         return 0;
2942 }
2943
2944 static int bpf_object_fixup_btf(struct bpf_object *obj)
2945 {
2946         int i, n, err = 0;
2947
2948         if (!obj->btf)
2949                 return 0;
2950
2951         n = btf__type_cnt(obj->btf);
2952         for (i = 1; i < n; i++) {
2953                 struct btf_type *t = btf_type_by_id(obj->btf, i);
2954
2955                 /* Loader needs to fix up some of the things compiler
2956                  * couldn't get its hands on while emitting BTF. This
2957                  * is section size and global variable offset. We use
2958                  * the info from the ELF itself for this purpose.
2959                  */
2960                 if (btf_is_datasec(t)) {
2961                         err = btf_fixup_datasec(obj, obj->btf, t);
2962                         if (err)
2963                                 return err;
2964                 }
2965         }
2966
2967         return 0;
2968 }
2969
2970 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2971 {
2972         if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2973             prog->type == BPF_PROG_TYPE_LSM)
2974                 return true;
2975
2976         /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2977          * also need vmlinux BTF
2978          */
2979         if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2980                 return true;
2981
2982         return false;
2983 }
2984
2985 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2986 {
2987         struct bpf_program *prog;
2988         int i;
2989
2990         /* CO-RE relocations need kernel BTF, only when btf_custom_path
2991          * is not specified
2992          */
2993         if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
2994                 return true;
2995
2996         /* Support for typed ksyms needs kernel BTF */
2997         for (i = 0; i < obj->nr_extern; i++) {
2998                 const struct extern_desc *ext;
2999
3000                 ext = &obj->externs[i];
3001                 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3002                         return true;
3003         }
3004
3005         bpf_object__for_each_program(prog, obj) {
3006                 if (!prog->autoload)
3007                         continue;
3008                 if (prog_needs_vmlinux_btf(prog))
3009                         return true;
3010         }
3011
3012         return false;
3013 }
3014
3015 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3016 {
3017         int err;
3018
3019         /* btf_vmlinux could be loaded earlier */
3020         if (obj->btf_vmlinux || obj->gen_loader)
3021                 return 0;
3022
3023         if (!force && !obj_needs_vmlinux_btf(obj))
3024                 return 0;
3025
3026         obj->btf_vmlinux = btf__load_vmlinux_btf();
3027         err = libbpf_get_error(obj->btf_vmlinux);
3028         if (err) {
3029                 pr_warn("Error loading vmlinux BTF: %d\n", err);
3030                 obj->btf_vmlinux = NULL;
3031                 return err;
3032         }
3033         return 0;
3034 }
3035
3036 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3037 {
3038         struct btf *kern_btf = obj->btf;
3039         bool btf_mandatory, sanitize;
3040         int i, err = 0;
3041
3042         if (!obj->btf)
3043                 return 0;
3044
3045         if (!kernel_supports(obj, FEAT_BTF)) {
3046                 if (kernel_needs_btf(obj)) {
3047                         err = -EOPNOTSUPP;
3048                         goto report;
3049                 }
3050                 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3051                 return 0;
3052         }
3053
3054         /* Even though some subprogs are global/weak, user might prefer more
3055          * permissive BPF verification process that BPF verifier performs for
3056          * static functions, taking into account more context from the caller
3057          * functions. In such case, they need to mark such subprogs with
3058          * __attribute__((visibility("hidden"))) and libbpf will adjust
3059          * corresponding FUNC BTF type to be marked as static and trigger more
3060          * involved BPF verification process.
3061          */
3062         for (i = 0; i < obj->nr_programs; i++) {
3063                 struct bpf_program *prog = &obj->programs[i];
3064                 struct btf_type *t;
3065                 const char *name;
3066                 int j, n;
3067
3068                 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3069                         continue;
3070
3071                 n = btf__type_cnt(obj->btf);
3072                 for (j = 1; j < n; j++) {
3073                         t = btf_type_by_id(obj->btf, j);
3074                         if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3075                                 continue;
3076
3077                         name = btf__str_by_offset(obj->btf, t->name_off);
3078                         if (strcmp(name, prog->name) != 0)
3079                                 continue;
3080
3081                         t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3082                         break;
3083                 }
3084         }
3085
3086         sanitize = btf_needs_sanitization(obj);
3087         if (sanitize) {
3088                 const void *raw_data;
3089                 __u32 sz;
3090
3091                 /* clone BTF to sanitize a copy and leave the original intact */
3092                 raw_data = btf__raw_data(obj->btf, &sz);
3093                 kern_btf = btf__new(raw_data, sz);
3094                 err = libbpf_get_error(kern_btf);
3095                 if (err)
3096                         return err;
3097
3098                 /* enforce 8-byte pointers for BPF-targeted BTFs */
3099                 btf__set_pointer_size(obj->btf, 8);
3100                 err = bpf_object__sanitize_btf(obj, kern_btf);
3101                 if (err)
3102                         return err;
3103         }
3104
3105         if (obj->gen_loader) {
3106                 __u32 raw_size = 0;
3107                 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3108
3109                 if (!raw_data)
3110                         return -ENOMEM;
3111                 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3112                 /* Pretend to have valid FD to pass various fd >= 0 checks.
3113                  * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3114                  */
3115                 btf__set_fd(kern_btf, 0);
3116         } else {
3117                 /* currently BPF_BTF_LOAD only supports log_level 1 */
3118                 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3119                                            obj->log_level ? 1 : 0);
3120         }
3121         if (sanitize) {
3122                 if (!err) {
3123                         /* move fd to libbpf's BTF */
3124                         btf__set_fd(obj->btf, btf__fd(kern_btf));
3125                         btf__set_fd(kern_btf, -1);
3126                 }
3127                 btf__free(kern_btf);
3128         }
3129 report:
3130         if (err) {
3131                 btf_mandatory = kernel_needs_btf(obj);
3132                 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3133                         btf_mandatory ? "BTF is mandatory, can't proceed."
3134                                       : "BTF is optional, ignoring.");
3135                 if (!btf_mandatory)
3136                         err = 0;
3137         }
3138         return err;
3139 }
3140
3141 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3142 {
3143         const char *name;
3144
3145         name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3146         if (!name) {
3147                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3148                         off, obj->path, elf_errmsg(-1));
3149                 return NULL;
3150         }
3151
3152         return name;
3153 }
3154
3155 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3156 {
3157         const char *name;
3158
3159         name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3160         if (!name) {
3161                 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3162                         off, obj->path, elf_errmsg(-1));
3163                 return NULL;
3164         }
3165
3166         return name;
3167 }
3168
3169 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3170 {
3171         Elf_Scn *scn;
3172
3173         scn = elf_getscn(obj->efile.elf, idx);
3174         if (!scn) {
3175                 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3176                         idx, obj->path, elf_errmsg(-1));
3177                 return NULL;
3178         }
3179         return scn;
3180 }
3181
3182 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3183 {
3184         Elf_Scn *scn = NULL;
3185         Elf *elf = obj->efile.elf;
3186         const char *sec_name;
3187
3188         while ((scn = elf_nextscn(elf, scn)) != NULL) {
3189                 sec_name = elf_sec_name(obj, scn);
3190                 if (!sec_name)
3191                         return NULL;
3192
3193                 if (strcmp(sec_name, name) != 0)
3194                         continue;
3195
3196                 return scn;
3197         }
3198         return NULL;
3199 }
3200
3201 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3202 {
3203         Elf64_Shdr *shdr;
3204
3205         if (!scn)
3206                 return NULL;
3207
3208         shdr = elf64_getshdr(scn);
3209         if (!shdr) {
3210                 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3211                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3212                 return NULL;
3213         }
3214
3215         return shdr;
3216 }
3217
3218 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3219 {
3220         const char *name;
3221         Elf64_Shdr *sh;
3222
3223         if (!scn)
3224                 return NULL;
3225
3226         sh = elf_sec_hdr(obj, scn);
3227         if (!sh)
3228                 return NULL;
3229
3230         name = elf_sec_str(obj, sh->sh_name);
3231         if (!name) {
3232                 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3233                         elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3234                 return NULL;
3235         }
3236
3237         return name;
3238 }
3239
3240 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3241 {
3242         Elf_Data *data;
3243
3244         if (!scn)
3245                 return NULL;
3246
3247         data = elf_getdata(scn, 0);
3248         if (!data) {
3249                 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3250                         elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3251                         obj->path, elf_errmsg(-1));
3252                 return NULL;
3253         }
3254
3255         return data;
3256 }
3257
3258 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3259 {
3260         if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3261                 return NULL;
3262
3263         return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3264 }
3265
3266 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3267 {
3268         if (idx >= data->d_size / sizeof(Elf64_Rel))
3269                 return NULL;
3270
3271         return (Elf64_Rel *)data->d_buf + idx;
3272 }
3273
3274 static bool is_sec_name_dwarf(const char *name)
3275 {
3276         /* approximation, but the actual list is too long */
3277         return str_has_pfx(name, ".debug_");
3278 }
3279
3280 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3281 {
3282         /* no special handling of .strtab */
3283         if (hdr->sh_type == SHT_STRTAB)
3284                 return true;
3285
3286         /* ignore .llvm_addrsig section as well */
3287         if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3288                 return true;
3289
3290         /* no subprograms will lead to an empty .text section, ignore it */
3291         if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3292             strcmp(name, ".text") == 0)
3293                 return true;
3294
3295         /* DWARF sections */
3296         if (is_sec_name_dwarf(name))
3297                 return true;
3298
3299         if (str_has_pfx(name, ".rel")) {
3300                 name += sizeof(".rel") - 1;
3301                 /* DWARF section relocations */
3302                 if (is_sec_name_dwarf(name))
3303                         return true;
3304
3305                 /* .BTF and .BTF.ext don't need relocations */
3306                 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3307                     strcmp(name, BTF_EXT_ELF_SEC) == 0)
3308                         return true;
3309         }
3310
3311         return false;
3312 }
3313
3314 static int cmp_progs(const void *_a, const void *_b)
3315 {
3316         const struct bpf_program *a = _a;
3317         const struct bpf_program *b = _b;
3318
3319         if (a->sec_idx != b->sec_idx)
3320                 return a->sec_idx < b->sec_idx ? -1 : 1;
3321
3322         /* sec_insn_off can't be the same within the section */
3323         return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3324 }
3325
3326 static int bpf_object__elf_collect(struct bpf_object *obj)
3327 {
3328         struct elf_sec_desc *sec_desc;
3329         Elf *elf = obj->efile.elf;
3330         Elf_Data *btf_ext_data = NULL;
3331         Elf_Data *btf_data = NULL;
3332         int idx = 0, err = 0;
3333         const char *name;
3334         Elf_Data *data;
3335         Elf_Scn *scn;
3336         Elf64_Shdr *sh;
3337
3338         /* ELF section indices are 0-based, but sec #0 is special "invalid"
3339          * section. Since section count retrieved by elf_getshdrnum() does
3340          * include sec #0, it is already the necessary size of an array to keep
3341          * all the sections.
3342          */
3343         if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3344                 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3345                         obj->path, elf_errmsg(-1));
3346                 return -LIBBPF_ERRNO__FORMAT;
3347         }
3348         obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3349         if (!obj->efile.secs)
3350                 return -ENOMEM;
3351
3352         /* a bunch of ELF parsing functionality depends on processing symbols,
3353          * so do the first pass and find the symbol table
3354          */
3355         scn = NULL;
3356         while ((scn = elf_nextscn(elf, scn)) != NULL) {
3357                 sh = elf_sec_hdr(obj, scn);
3358                 if (!sh)
3359                         return -LIBBPF_ERRNO__FORMAT;
3360
3361                 if (sh->sh_type == SHT_SYMTAB) {
3362                         if (obj->efile.symbols) {
3363                                 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3364                                 return -LIBBPF_ERRNO__FORMAT;
3365                         }
3366
3367                         data = elf_sec_data(obj, scn);
3368                         if (!data)
3369                                 return -LIBBPF_ERRNO__FORMAT;
3370
3371                         idx = elf_ndxscn(scn);
3372
3373                         obj->efile.symbols = data;
3374                         obj->efile.symbols_shndx = idx;
3375                         obj->efile.strtabidx = sh->sh_link;
3376                 }
3377         }
3378
3379         if (!obj->efile.symbols) {
3380                 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3381                         obj->path);
3382                 return -ENOENT;
3383         }
3384
3385         scn = NULL;
3386         while ((scn = elf_nextscn(elf, scn)) != NULL) {
3387                 idx = elf_ndxscn(scn);
3388                 sec_desc = &obj->efile.secs[idx];
3389
3390                 sh = elf_sec_hdr(obj, scn);
3391                 if (!sh)
3392                         return -LIBBPF_ERRNO__FORMAT;
3393
3394                 name = elf_sec_str(obj, sh->sh_name);
3395                 if (!name)
3396                         return -LIBBPF_ERRNO__FORMAT;
3397
3398                 if (ignore_elf_section(sh, name))
3399                         continue;
3400
3401                 data = elf_sec_data(obj, scn);
3402                 if (!data)
3403                         return -LIBBPF_ERRNO__FORMAT;
3404
3405                 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3406                          idx, name, (unsigned long)data->d_size,
3407                          (int)sh->sh_link, (unsigned long)sh->sh_flags,
3408                          (int)sh->sh_type);
3409
3410                 if (strcmp(name, "license") == 0) {
3411                         err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3412                         if (err)
3413                                 return err;
3414                 } else if (strcmp(name, "version") == 0) {
3415                         err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3416                         if (err)
3417                                 return err;
3418                 } else if (strcmp(name, "maps") == 0) {
3419                         pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3420                         return -ENOTSUP;
3421                 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3422                         obj->efile.btf_maps_shndx = idx;
3423                 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3424                         if (sh->sh_type != SHT_PROGBITS)
3425                                 return -LIBBPF_ERRNO__FORMAT;
3426                         btf_data = data;
3427                 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3428                         if (sh->sh_type != SHT_PROGBITS)
3429                                 return -LIBBPF_ERRNO__FORMAT;
3430                         btf_ext_data = data;
3431                 } else if (sh->sh_type == SHT_SYMTAB) {
3432                         /* already processed during the first pass above */
3433                 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3434                         if (sh->sh_flags & SHF_EXECINSTR) {
3435                                 if (strcmp(name, ".text") == 0)
3436                                         obj->efile.text_shndx = idx;
3437                                 err = bpf_object__add_programs(obj, data, name, idx);
3438                                 if (err)
3439                                         return err;
3440                         } else if (strcmp(name, DATA_SEC) == 0 ||
3441                                    str_has_pfx(name, DATA_SEC ".")) {
3442                                 sec_desc->sec_type = SEC_DATA;
3443                                 sec_desc->shdr = sh;
3444                                 sec_desc->data = data;
3445                         } else if (strcmp(name, RODATA_SEC) == 0 ||
3446                                    str_has_pfx(name, RODATA_SEC ".")) {
3447                                 sec_desc->sec_type = SEC_RODATA;
3448                                 sec_desc->shdr = sh;
3449                                 sec_desc->data = data;
3450                         } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3451                                 obj->efile.st_ops_data = data;
3452                                 obj->efile.st_ops_shndx = idx;
3453                         } else {
3454                                 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3455                                         idx, name);
3456                         }
3457                 } else if (sh->sh_type == SHT_REL) {
3458                         int targ_sec_idx = sh->sh_info; /* points to other section */
3459
3460                         if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3461                             targ_sec_idx >= obj->efile.sec_cnt)
3462                                 return -LIBBPF_ERRNO__FORMAT;
3463
3464                         /* Only do relo for section with exec instructions */
3465                         if (!section_have_execinstr(obj, targ_sec_idx) &&
3466                             strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3467                             strcmp(name, ".rel" MAPS_ELF_SEC)) {
3468                                 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3469                                         idx, name, targ_sec_idx,
3470                                         elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3471                                 continue;
3472                         }
3473
3474                         sec_desc->sec_type = SEC_RELO;
3475                         sec_desc->shdr = sh;
3476                         sec_desc->data = data;
3477                 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3478                                                          str_has_pfx(name, BSS_SEC "."))) {
3479                         sec_desc->sec_type = SEC_BSS;
3480                         sec_desc->shdr = sh;
3481                         sec_desc->data = data;
3482                 } else {
3483                         pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3484                                 (size_t)sh->sh_size);
3485                 }
3486         }
3487
3488         if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3489                 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3490                 return -LIBBPF_ERRNO__FORMAT;
3491         }
3492
3493         /* sort BPF programs by section name and in-section instruction offset
3494          * for faster search
3495          */
3496         if (obj->nr_programs)
3497                 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3498
3499         return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3500 }
3501
3502 static bool sym_is_extern(const Elf64_Sym *sym)
3503 {
3504         int bind = ELF64_ST_BIND(sym->st_info);
3505         /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3506         return sym->st_shndx == SHN_UNDEF &&
3507                (bind == STB_GLOBAL || bind == STB_WEAK) &&
3508                ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3509 }
3510
3511 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3512 {
3513         int bind = ELF64_ST_BIND(sym->st_info);
3514         int type = ELF64_ST_TYPE(sym->st_info);
3515
3516         /* in .text section */
3517         if (sym->st_shndx != text_shndx)
3518                 return false;
3519
3520         /* local function */
3521         if (bind == STB_LOCAL && type == STT_SECTION)
3522                 return true;
3523
3524         /* global function */
3525         return bind == STB_GLOBAL && type == STT_FUNC;
3526 }
3527
3528 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3529 {
3530         const struct btf_type *t;
3531         const char *tname;
3532         int i, n;
3533
3534         if (!btf)
3535                 return -ESRCH;
3536
3537         n = btf__type_cnt(btf);
3538         for (i = 1; i < n; i++) {
3539                 t = btf__type_by_id(btf, i);
3540
3541                 if (!btf_is_var(t) && !btf_is_func(t))
3542                         continue;
3543
3544                 tname = btf__name_by_offset(btf, t->name_off);
3545                 if (strcmp(tname, ext_name))
3546                         continue;
3547
3548                 if (btf_is_var(t) &&
3549                     btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3550                         return -EINVAL;
3551
3552                 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3553                         return -EINVAL;
3554
3555                 return i;
3556         }
3557
3558         return -ENOENT;
3559 }
3560
3561 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3562         const struct btf_var_secinfo *vs;
3563         const struct btf_type *t;
3564         int i, j, n;
3565
3566         if (!btf)
3567                 return -ESRCH;
3568
3569         n = btf__type_cnt(btf);
3570         for (i = 1; i < n; i++) {
3571                 t = btf__type_by_id(btf, i);
3572
3573                 if (!btf_is_datasec(t))
3574                         continue;
3575
3576                 vs = btf_var_secinfos(t);
3577                 for (j = 0; j < btf_vlen(t); j++, vs++) {
3578                         if (vs->type == ext_btf_id)
3579                                 return i;
3580                 }
3581         }
3582
3583         return -ENOENT;
3584 }
3585
3586 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3587                                      bool *is_signed)
3588 {
3589         const struct btf_type *t;
3590         const char *name;
3591
3592         t = skip_mods_and_typedefs(btf, id, NULL);
3593         name = btf__name_by_offset(btf, t->name_off);
3594
3595         if (is_signed)
3596                 *is_signed = false;
3597         switch (btf_kind(t)) {
3598         case BTF_KIND_INT: {
3599                 int enc = btf_int_encoding(t);
3600
3601                 if (enc & BTF_INT_BOOL)
3602                         return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3603                 if (is_signed)
3604                         *is_signed = enc & BTF_INT_SIGNED;
3605                 if (t->size == 1)
3606                         return KCFG_CHAR;
3607                 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3608                         return KCFG_UNKNOWN;
3609                 return KCFG_INT;
3610         }
3611         case BTF_KIND_ENUM:
3612                 if (t->size != 4)
3613                         return KCFG_UNKNOWN;
3614                 if (strcmp(name, "libbpf_tristate"))
3615                         return KCFG_UNKNOWN;
3616                 return KCFG_TRISTATE;
3617         case BTF_KIND_ENUM64:
3618                 if (strcmp(name, "libbpf_tristate"))
3619                         return KCFG_UNKNOWN;
3620                 return KCFG_TRISTATE;
3621         case BTF_KIND_ARRAY:
3622                 if (btf_array(t)->nelems == 0)
3623                         return KCFG_UNKNOWN;
3624                 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3625                         return KCFG_UNKNOWN;
3626                 return KCFG_CHAR_ARR;
3627         default:
3628                 return KCFG_UNKNOWN;
3629         }
3630 }
3631
3632 static int cmp_externs(const void *_a, const void *_b)
3633 {
3634         const struct extern_desc *a = _a;
3635         const struct extern_desc *b = _b;
3636
3637         if (a->type != b->type)
3638                 return a->type < b->type ? -1 : 1;
3639
3640         if (a->type == EXT_KCFG) {
3641                 /* descending order by alignment requirements */
3642                 if (a->kcfg.align != b->kcfg.align)
3643                         return a->kcfg.align > b->kcfg.align ? -1 : 1;
3644                 /* ascending order by size, within same alignment class */
3645                 if (a->kcfg.sz != b->kcfg.sz)
3646                         return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3647         }
3648
3649         /* resolve ties by name */
3650         return strcmp(a->name, b->name);
3651 }
3652
3653 static int find_int_btf_id(const struct btf *btf)
3654 {
3655         const struct btf_type *t;
3656         int i, n;
3657
3658         n = btf__type_cnt(btf);
3659         for (i = 1; i < n; i++) {
3660                 t = btf__type_by_id(btf, i);
3661
3662                 if (btf_is_int(t) && btf_int_bits(t) == 32)
3663                         return i;
3664         }
3665
3666         return 0;
3667 }
3668
3669 static int add_dummy_ksym_var(struct btf *btf)
3670 {
3671         int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3672         const struct btf_var_secinfo *vs;
3673         const struct btf_type *sec;
3674
3675         if (!btf)
3676                 return 0;
3677
3678         sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3679                                             BTF_KIND_DATASEC);
3680         if (sec_btf_id < 0)
3681                 return 0;
3682
3683         sec = btf__type_by_id(btf, sec_btf_id);
3684         vs = btf_var_secinfos(sec);
3685         for (i = 0; i < btf_vlen(sec); i++, vs++) {
3686                 const struct btf_type *vt;
3687
3688                 vt = btf__type_by_id(btf, vs->type);
3689                 if (btf_is_func(vt))
3690                         break;
3691         }
3692
3693         /* No func in ksyms sec.  No need to add dummy var. */
3694         if (i == btf_vlen(sec))
3695                 return 0;
3696
3697         int_btf_id = find_int_btf_id(btf);
3698         dummy_var_btf_id = btf__add_var(btf,
3699                                         "dummy_ksym",
3700                                         BTF_VAR_GLOBAL_ALLOCATED,
3701                                         int_btf_id);
3702         if (dummy_var_btf_id < 0)
3703                 pr_warn("cannot create a dummy_ksym var\n");
3704
3705         return dummy_var_btf_id;
3706 }
3707
3708 static int bpf_object__collect_externs(struct bpf_object *obj)
3709 {
3710         struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3711         const struct btf_type *t;
3712         struct extern_desc *ext;
3713         int i, n, off, dummy_var_btf_id;
3714         const char *ext_name, *sec_name;
3715         Elf_Scn *scn;
3716         Elf64_Shdr *sh;
3717
3718         if (!obj->efile.symbols)
3719                 return 0;
3720
3721         scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3722         sh = elf_sec_hdr(obj, scn);
3723         if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
3724                 return -LIBBPF_ERRNO__FORMAT;
3725
3726         dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3727         if (dummy_var_btf_id < 0)
3728                 return dummy_var_btf_id;
3729
3730         n = sh->sh_size / sh->sh_entsize;
3731         pr_debug("looking for externs among %d symbols...\n", n);
3732
3733         for (i = 0; i < n; i++) {
3734                 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
3735
3736                 if (!sym)
3737                         return -LIBBPF_ERRNO__FORMAT;
3738                 if (!sym_is_extern(sym))
3739                         continue;
3740                 ext_name = elf_sym_str(obj, sym->st_name);
3741                 if (!ext_name || !ext_name[0])
3742                         continue;
3743
3744                 ext = obj->externs;
3745                 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3746                 if (!ext)
3747                         return -ENOMEM;
3748                 obj->externs = ext;
3749                 ext = &ext[obj->nr_extern];
3750                 memset(ext, 0, sizeof(*ext));
3751                 obj->nr_extern++;
3752
3753                 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3754                 if (ext->btf_id <= 0) {
3755                         pr_warn("failed to find BTF for extern '%s': %d\n",
3756                                 ext_name, ext->btf_id);
3757                         return ext->btf_id;
3758                 }
3759                 t = btf__type_by_id(obj->btf, ext->btf_id);
3760                 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3761                 ext->sym_idx = i;
3762                 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
3763
3764                 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3765                 if (ext->sec_btf_id <= 0) {
3766                         pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3767                                 ext_name, ext->btf_id, ext->sec_btf_id);
3768                         return ext->sec_btf_id;
3769                 }
3770                 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3771                 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3772
3773                 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3774                         if (btf_is_func(t)) {
3775                                 pr_warn("extern function %s is unsupported under %s section\n",
3776                                         ext->name, KCONFIG_SEC);
3777                                 return -ENOTSUP;
3778                         }
3779                         kcfg_sec = sec;
3780                         ext->type = EXT_KCFG;
3781                         ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3782                         if (ext->kcfg.sz <= 0) {
3783                                 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3784                                         ext_name, ext->kcfg.sz);
3785                                 return ext->kcfg.sz;
3786                         }
3787                         ext->kcfg.align = btf__align_of(obj->btf, t->type);
3788                         if (ext->kcfg.align <= 0) {
3789                                 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3790                                         ext_name, ext->kcfg.align);
3791                                 return -EINVAL;
3792                         }
3793                         ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3794                                                         &ext->kcfg.is_signed);
3795                         if (ext->kcfg.type == KCFG_UNKNOWN) {
3796                                 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
3797                                 return -ENOTSUP;
3798                         }
3799                 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3800                         ksym_sec = sec;
3801                         ext->type = EXT_KSYM;
3802                         skip_mods_and_typedefs(obj->btf, t->type,
3803                                                &ext->ksym.type_id);
3804                 } else {
3805                         pr_warn("unrecognized extern section '%s'\n", sec_name);
3806                         return -ENOTSUP;
3807                 }
3808         }
3809         pr_debug("collected %d externs total\n", obj->nr_extern);
3810
3811         if (!obj->nr_extern)
3812                 return 0;
3813
3814         /* sort externs by type, for kcfg ones also by (align, size, name) */
3815         qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3816
3817         /* for .ksyms section, we need to turn all externs into allocated
3818          * variables in BTF to pass kernel verification; we do this by
3819          * pretending that each extern is a 8-byte variable
3820          */
3821         if (ksym_sec) {
3822                 /* find existing 4-byte integer type in BTF to use for fake
3823                  * extern variables in DATASEC
3824                  */
3825                 int int_btf_id = find_int_btf_id(obj->btf);
3826                 /* For extern function, a dummy_var added earlier
3827                  * will be used to replace the vs->type and
3828                  * its name string will be used to refill
3829                  * the missing param's name.
3830                  */
3831                 const struct btf_type *dummy_var;
3832
3833                 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3834                 for (i = 0; i < obj->nr_extern; i++) {
3835                         ext = &obj->externs[i];
3836                         if (ext->type != EXT_KSYM)
3837                                 continue;
3838                         pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3839                                  i, ext->sym_idx, ext->name);
3840                 }
3841
3842                 sec = ksym_sec;
3843                 n = btf_vlen(sec);
3844                 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3845                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3846                         struct btf_type *vt;
3847
3848                         vt = (void *)btf__type_by_id(obj->btf, vs->type);
3849                         ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3850                         ext = find_extern_by_name(obj, ext_name);
3851                         if (!ext) {
3852                                 pr_warn("failed to find extern definition for BTF %s '%s'\n",
3853                                         btf_kind_str(vt), ext_name);
3854                                 return -ESRCH;
3855                         }
3856                         if (btf_is_func(vt)) {
3857                                 const struct btf_type *func_proto;
3858                                 struct btf_param *param;
3859                                 int j;
3860
3861                                 func_proto = btf__type_by_id(obj->btf,
3862                                                              vt->type);
3863                                 param = btf_params(func_proto);
3864                                 /* Reuse the dummy_var string if the
3865                                  * func proto does not have param name.
3866                                  */
3867                                 for (j = 0; j < btf_vlen(func_proto); j++)
3868                                         if (param[j].type && !param[j].name_off)
3869                                                 param[j].name_off =
3870                                                         dummy_var->name_off;
3871                                 vs->type = dummy_var_btf_id;
3872                                 vt->info &= ~0xffff;
3873                                 vt->info |= BTF_FUNC_GLOBAL;
3874                         } else {
3875                                 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3876                                 vt->type = int_btf_id;
3877                         }
3878                         vs->offset = off;
3879                         vs->size = sizeof(int);
3880                 }
3881                 sec->size = off;
3882         }
3883
3884         if (kcfg_sec) {
3885                 sec = kcfg_sec;
3886                 /* for kcfg externs calculate their offsets within a .kconfig map */
3887                 off = 0;
3888                 for (i = 0; i < obj->nr_extern; i++) {
3889                         ext = &obj->externs[i];
3890                         if (ext->type != EXT_KCFG)
3891                                 continue;
3892
3893                         ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3894                         off = ext->kcfg.data_off + ext->kcfg.sz;
3895                         pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3896                                  i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3897                 }
3898                 sec->size = off;
3899                 n = btf_vlen(sec);
3900                 for (i = 0; i < n; i++) {
3901                         struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3902
3903                         t = btf__type_by_id(obj->btf, vs->type);
3904                         ext_name = btf__name_by_offset(obj->btf, t->name_off);
3905                         ext = find_extern_by_name(obj, ext_name);
3906                         if (!ext) {
3907                                 pr_warn("failed to find extern definition for BTF var '%s'\n",
3908                                         ext_name);
3909                                 return -ESRCH;
3910                         }
3911                         btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3912                         vs->offset = ext->kcfg.data_off;
3913                 }
3914         }
3915         return 0;
3916 }
3917
3918 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
3919 {
3920         return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3921 }
3922
3923 struct bpf_program *
3924 bpf_object__find_program_by_name(const struct bpf_object *obj,
3925                                  const char *name)
3926 {
3927         struct bpf_program *prog;
3928
3929         bpf_object__for_each_program(prog, obj) {
3930                 if (prog_is_subprog(obj, prog))
3931                         continue;
3932                 if (!strcmp(prog->name, name))
3933                         return prog;
3934         }
3935         return errno = ENOENT, NULL;
3936 }
3937
3938 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3939                                       int shndx)
3940 {
3941         switch (obj->efile.secs[shndx].sec_type) {
3942         case SEC_BSS:
3943         case SEC_DATA:
3944         case SEC_RODATA:
3945                 return true;
3946         default:
3947                 return false;
3948         }
3949 }
3950
3951 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3952                                       int shndx)
3953 {
3954         return shndx == obj->efile.btf_maps_shndx;
3955 }
3956
3957 static enum libbpf_map_type
3958 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3959 {
3960         if (shndx == obj->efile.symbols_shndx)
3961                 return LIBBPF_MAP_KCONFIG;
3962
3963         switch (obj->efile.secs[shndx].sec_type) {
3964         case SEC_BSS:
3965                 return LIBBPF_MAP_BSS;
3966         case SEC_DATA:
3967                 return LIBBPF_MAP_DATA;
3968         case SEC_RODATA:
3969                 return LIBBPF_MAP_RODATA;
3970         default:
3971                 return LIBBPF_MAP_UNSPEC;
3972         }
3973 }
3974
3975 static int bpf_program__record_reloc(struct bpf_program *prog,
3976                                      struct reloc_desc *reloc_desc,
3977                                      __u32 insn_idx, const char *sym_name,
3978                                      const Elf64_Sym *sym, const Elf64_Rel *rel)
3979 {
3980         struct bpf_insn *insn = &prog->insns[insn_idx];
3981         size_t map_idx, nr_maps = prog->obj->nr_maps;
3982         struct bpf_object *obj = prog->obj;
3983         __u32 shdr_idx = sym->st_shndx;
3984         enum libbpf_map_type type;
3985         const char *sym_sec_name;
3986         struct bpf_map *map;
3987
3988         if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3989                 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3990                         prog->name, sym_name, insn_idx, insn->code);
3991                 return -LIBBPF_ERRNO__RELOC;
3992         }
3993
3994         if (sym_is_extern(sym)) {
3995                 int sym_idx = ELF64_R_SYM(rel->r_info);
3996                 int i, n = obj->nr_extern;
3997                 struct extern_desc *ext;
3998
3999                 for (i = 0; i < n; i++) {
4000                         ext = &obj->externs[i];
4001                         if (ext->sym_idx == sym_idx)
4002                                 break;
4003                 }
4004                 if (i >= n) {
4005                         pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4006                                 prog->name, sym_name, sym_idx);
4007                         return -LIBBPF_ERRNO__RELOC;
4008                 }
4009                 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4010                          prog->name, i, ext->name, ext->sym_idx, insn_idx);
4011                 if (insn->code == (BPF_JMP | BPF_CALL))
4012                         reloc_desc->type = RELO_EXTERN_FUNC;
4013                 else
4014                         reloc_desc->type = RELO_EXTERN_VAR;
4015                 reloc_desc->insn_idx = insn_idx;
4016                 reloc_desc->sym_off = i; /* sym_off stores extern index */
4017                 return 0;
4018         }
4019
4020         /* sub-program call relocation */
4021         if (is_call_insn(insn)) {
4022                 if (insn->src_reg != BPF_PSEUDO_CALL) {
4023                         pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4024                         return -LIBBPF_ERRNO__RELOC;
4025                 }
4026                 /* text_shndx can be 0, if no default "main" program exists */
4027                 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4028                         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4029                         pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4030                                 prog->name, sym_name, sym_sec_name);
4031                         return -LIBBPF_ERRNO__RELOC;
4032                 }
4033                 if (sym->st_value % BPF_INSN_SZ) {
4034                         pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4035                                 prog->name, sym_name, (size_t)sym->st_value);
4036                         return -LIBBPF_ERRNO__RELOC;
4037                 }
4038                 reloc_desc->type = RELO_CALL;
4039                 reloc_desc->insn_idx = insn_idx;
4040                 reloc_desc->sym_off = sym->st_value;
4041                 return 0;
4042         }
4043
4044         if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4045                 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4046                         prog->name, sym_name, shdr_idx);
4047                 return -LIBBPF_ERRNO__RELOC;
4048         }
4049
4050         /* loading subprog addresses */
4051         if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4052                 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4053                  * local_func: sym->st_value = 0, insn->imm = offset in the section.
4054                  */
4055                 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4056                         pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4057                                 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4058                         return -LIBBPF_ERRNO__RELOC;
4059                 }
4060
4061                 reloc_desc->type = RELO_SUBPROG_ADDR;
4062                 reloc_desc->insn_idx = insn_idx;
4063                 reloc_desc->sym_off = sym->st_value;
4064                 return 0;
4065         }
4066
4067         type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4068         sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4069
4070         /* generic map reference relocation */
4071         if (type == LIBBPF_MAP_UNSPEC) {
4072                 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4073                         pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4074                                 prog->name, sym_name, sym_sec_name);
4075                         return -LIBBPF_ERRNO__RELOC;
4076                 }
4077                 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4078                         map = &obj->maps[map_idx];
4079                         if (map->libbpf_type != type ||
4080                             map->sec_idx != sym->st_shndx ||
4081                             map->sec_offset != sym->st_value)
4082                                 continue;
4083                         pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4084                                  prog->name, map_idx, map->name, map->sec_idx,
4085                                  map->sec_offset, insn_idx);
4086                         break;
4087                 }
4088                 if (map_idx >= nr_maps) {
4089                         pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4090                                 prog->name, sym_sec_name, (size_t)sym->st_value);
4091                         return -LIBBPF_ERRNO__RELOC;
4092                 }
4093                 reloc_desc->type = RELO_LD64;
4094                 reloc_desc->insn_idx = insn_idx;
4095                 reloc_desc->map_idx = map_idx;
4096                 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4097                 return 0;
4098         }
4099
4100         /* global data map relocation */
4101         if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4102                 pr_warn("prog '%s': bad data relo against section '%s'\n",
4103                         prog->name, sym_sec_name);
4104                 return -LIBBPF_ERRNO__RELOC;
4105         }
4106         for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4107                 map = &obj->maps[map_idx];
4108                 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4109                         continue;
4110                 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4111                          prog->name, map_idx, map->name, map->sec_idx,
4112                          map->sec_offset, insn_idx);
4113                 break;
4114         }
4115         if (map_idx >= nr_maps) {
4116                 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4117                         prog->name, sym_sec_name);
4118                 return -LIBBPF_ERRNO__RELOC;
4119         }
4120
4121         reloc_desc->type = RELO_DATA;
4122         reloc_desc->insn_idx = insn_idx;
4123         reloc_desc->map_idx = map_idx;
4124         reloc_desc->sym_off = sym->st_value;
4125         return 0;
4126 }
4127
4128 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4129 {
4130         return insn_idx >= prog->sec_insn_off &&
4131                insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4132 }
4133
4134 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4135                                                  size_t sec_idx, size_t insn_idx)
4136 {
4137         int l = 0, r = obj->nr_programs - 1, m;
4138         struct bpf_program *prog;
4139
4140         if (!obj->nr_programs)
4141                 return NULL;
4142
4143         while (l < r) {
4144                 m = l + (r - l + 1) / 2;
4145                 prog = &obj->programs[m];
4146
4147                 if (prog->sec_idx < sec_idx ||
4148                     (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4149                         l = m;
4150                 else
4151                         r = m - 1;
4152         }
4153         /* matching program could be at index l, but it still might be the
4154          * wrong one, so we need to double check conditions for the last time
4155          */
4156         prog = &obj->programs[l];
4157         if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4158                 return prog;
4159         return NULL;
4160 }
4161
4162 static int
4163 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4164 {
4165         const char *relo_sec_name, *sec_name;
4166         size_t sec_idx = shdr->sh_info, sym_idx;
4167         struct bpf_program *prog;
4168         struct reloc_desc *relos;
4169         int err, i, nrels;
4170         const char *sym_name;
4171         __u32 insn_idx;
4172         Elf_Scn *scn;
4173         Elf_Data *scn_data;
4174         Elf64_Sym *sym;
4175         Elf64_Rel *rel;
4176
4177         if (sec_idx >= obj->efile.sec_cnt)
4178                 return -EINVAL;
4179
4180         scn = elf_sec_by_idx(obj, sec_idx);
4181         scn_data = elf_sec_data(obj, scn);
4182
4183         relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4184         sec_name = elf_sec_name(obj, scn);
4185         if (!relo_sec_name || !sec_name)
4186                 return -EINVAL;
4187
4188         pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4189                  relo_sec_name, sec_idx, sec_name);
4190         nrels = shdr->sh_size / shdr->sh_entsize;
4191
4192         for (i = 0; i < nrels; i++) {
4193                 rel = elf_rel_by_idx(data, i);
4194                 if (!rel) {
4195                         pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4196                         return -LIBBPF_ERRNO__FORMAT;
4197                 }
4198
4199                 sym_idx = ELF64_R_SYM(rel->r_info);
4200                 sym = elf_sym_by_idx(obj, sym_idx);
4201                 if (!sym) {
4202                         pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4203                                 relo_sec_name, sym_idx, i);
4204                         return -LIBBPF_ERRNO__FORMAT;
4205                 }
4206
4207                 if (sym->st_shndx >= obj->efile.sec_cnt) {
4208                         pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4209                                 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4210                         return -LIBBPF_ERRNO__FORMAT;
4211                 }
4212
4213                 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4214                         pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4215                                 relo_sec_name, (size_t)rel->r_offset, i);
4216                         return -LIBBPF_ERRNO__FORMAT;
4217                 }
4218
4219                 insn_idx = rel->r_offset / BPF_INSN_SZ;
4220                 /* relocations against static functions are recorded as
4221                  * relocations against the section that contains a function;
4222                  * in such case, symbol will be STT_SECTION and sym.st_name
4223                  * will point to empty string (0), so fetch section name
4224                  * instead
4225                  */
4226                 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4227                         sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4228                 else
4229                         sym_name = elf_sym_str(obj, sym->st_name);
4230                 sym_name = sym_name ?: "<?";
4231
4232                 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4233                          relo_sec_name, i, insn_idx, sym_name);
4234
4235                 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4236                 if (!prog) {
4237                         pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4238                                 relo_sec_name, i, sec_name, insn_idx);
4239                         continue;
4240                 }
4241
4242                 relos = libbpf_reallocarray(prog->reloc_desc,
4243                                             prog->nr_reloc + 1, sizeof(*relos));
4244                 if (!relos)
4245                         return -ENOMEM;
4246                 prog->reloc_desc = relos;
4247
4248                 /* adjust insn_idx to local BPF program frame of reference */
4249                 insn_idx -= prog->sec_insn_off;
4250                 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4251                                                 insn_idx, sym_name, sym, rel);
4252                 if (err)
4253                         return err;
4254
4255                 prog->nr_reloc++;
4256         }
4257         return 0;
4258 }
4259
4260 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4261 {
4262         int id;
4263
4264         if (!obj->btf)
4265                 return -ENOENT;
4266
4267         /* if it's BTF-defined map, we don't need to search for type IDs.
4268          * For struct_ops map, it does not need btf_key_type_id and
4269          * btf_value_type_id.
4270          */
4271         if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4272                 return 0;
4273
4274         /*
4275          * LLVM annotates global data differently in BTF, that is,
4276          * only as '.data', '.bss' or '.rodata'.
4277          */
4278         if (!bpf_map__is_internal(map))
4279                 return -ENOENT;
4280
4281         id = btf__find_by_name(obj->btf, map->real_name);
4282         if (id < 0)
4283                 return id;
4284
4285         map->btf_key_type_id = 0;
4286         map->btf_value_type_id = id;
4287         return 0;
4288 }
4289
4290 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4291 {
4292         char file[PATH_MAX], buff[4096];
4293         FILE *fp;
4294         __u32 val;
4295         int err;
4296
4297         snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4298         memset(info, 0, sizeof(*info));
4299
4300         fp = fopen(file, "r");
4301         if (!fp) {
4302                 err = -errno;
4303                 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4304                         err);
4305                 return err;
4306         }
4307
4308         while (fgets(buff, sizeof(buff), fp)) {
4309                 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4310                         info->type = val;
4311                 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4312                         info->key_size = val;
4313                 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4314                         info->value_size = val;
4315                 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4316                         info->max_entries = val;
4317                 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4318                         info->map_flags = val;
4319         }
4320
4321         fclose(fp);
4322
4323         return 0;
4324 }
4325
4326 bool bpf_map__autocreate(const struct bpf_map *map)
4327 {
4328         return map->autocreate;
4329 }
4330
4331 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4332 {
4333         if (map->obj->loaded)
4334                 return libbpf_err(-EBUSY);
4335
4336         map->autocreate = autocreate;
4337         return 0;
4338 }
4339
4340 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4341 {
4342         struct bpf_map_info info;
4343         __u32 len = sizeof(info), name_len;
4344         int new_fd, err;
4345         char *new_name;
4346
4347         memset(&info, 0, len);
4348         err = bpf_map_get_info_by_fd(fd, &info, &len);
4349         if (err && errno == EINVAL)
4350                 err = bpf_get_map_info_from_fdinfo(fd, &info);
4351         if (err)
4352                 return libbpf_err(err);
4353
4354         name_len = strlen(info.name);
4355         if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4356                 new_name = strdup(map->name);
4357         else
4358                 new_name = strdup(info.name);
4359
4360         if (!new_name)
4361                 return libbpf_err(-errno);
4362
4363         new_fd = open("/", O_RDONLY | O_CLOEXEC);
4364         if (new_fd < 0) {
4365                 err = -errno;
4366                 goto err_free_new_name;
4367         }
4368
4369         new_fd = dup3(fd, new_fd, O_CLOEXEC);
4370         if (new_fd < 0) {
4371                 err = -errno;
4372                 goto err_close_new_fd;
4373         }
4374
4375         err = zclose(map->fd);
4376         if (err) {
4377                 err = -errno;
4378                 goto err_close_new_fd;
4379         }
4380         free(map->name);
4381
4382         map->fd = new_fd;
4383         map->name = new_name;
4384         map->def.type = info.type;
4385         map->def.key_size = info.key_size;
4386         map->def.value_size = info.value_size;
4387         map->def.max_entries = info.max_entries;
4388         map->def.map_flags = info.map_flags;
4389         map->btf_key_type_id = info.btf_key_type_id;
4390         map->btf_value_type_id = info.btf_value_type_id;
4391         map->reused = true;
4392         map->map_extra = info.map_extra;
4393
4394         return 0;
4395
4396 err_close_new_fd:
4397         close(new_fd);
4398 err_free_new_name:
4399         free(new_name);
4400         return libbpf_err(err);
4401 }
4402
4403 __u32 bpf_map__max_entries(const struct bpf_map *map)
4404 {
4405         return map->def.max_entries;
4406 }
4407
4408 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4409 {
4410         if (!bpf_map_type__is_map_in_map(map->def.type))
4411                 return errno = EINVAL, NULL;
4412
4413         return map->inner_map;
4414 }
4415
4416 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4417 {
4418         if (map->obj->loaded)
4419                 return libbpf_err(-EBUSY);
4420
4421         map->def.max_entries = max_entries;
4422
4423         /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4424         if (map_is_ringbuf(map))
4425                 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4426
4427         return 0;
4428 }
4429
4430 static int
4431 bpf_object__probe_loading(struct bpf_object *obj)
4432 {
4433         char *cp, errmsg[STRERR_BUFSIZE];
4434         struct bpf_insn insns[] = {
4435                 BPF_MOV64_IMM(BPF_REG_0, 0),
4436                 BPF_EXIT_INSN(),
4437         };
4438         int ret, insn_cnt = ARRAY_SIZE(insns);
4439
4440         if (obj->gen_loader)
4441                 return 0;
4442
4443         ret = bump_rlimit_memlock();
4444         if (ret)
4445                 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4446
4447         /* make sure basic loading works */
4448         ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4449         if (ret < 0)
4450                 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4451         if (ret < 0) {
4452                 ret = errno;
4453                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4454                 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4455                         "program. Make sure your kernel supports BPF "
4456                         "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4457                         "set to big enough value.\n", __func__, cp, ret);
4458                 return -ret;
4459         }
4460         close(ret);
4461
4462         return 0;
4463 }
4464
4465 static int probe_fd(int fd)
4466 {
4467         if (fd >= 0)
4468                 close(fd);
4469         return fd >= 0;
4470 }
4471
4472 static int probe_kern_prog_name(void)
4473 {
4474         const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
4475         struct bpf_insn insns[] = {
4476                 BPF_MOV64_IMM(BPF_REG_0, 0),
4477                 BPF_EXIT_INSN(),
4478         };
4479         union bpf_attr attr;
4480         int ret;
4481
4482         memset(&attr, 0, attr_sz);
4483         attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4484         attr.license = ptr_to_u64("GPL");
4485         attr.insns = ptr_to_u64(insns);
4486         attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
4487         libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
4488
4489         /* make sure loading with name works */
4490         ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
4491         return probe_fd(ret);
4492 }
4493
4494 static int probe_kern_global_data(void)
4495 {
4496         char *cp, errmsg[STRERR_BUFSIZE];
4497         struct bpf_insn insns[] = {
4498                 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4499                 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4500                 BPF_MOV64_IMM(BPF_REG_0, 0),
4501                 BPF_EXIT_INSN(),
4502         };
4503         int ret, map, insn_cnt = ARRAY_SIZE(insns);
4504
4505         map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
4506         if (map < 0) {
4507                 ret = -errno;
4508                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4509                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4510                         __func__, cp, -ret);
4511                 return ret;
4512         }
4513
4514         insns[0].imm = map;
4515
4516         ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4517         close(map);
4518         return probe_fd(ret);
4519 }
4520
4521 static int probe_kern_btf(void)
4522 {
4523         static const char strs[] = "\0int";
4524         __u32 types[] = {
4525                 /* int */
4526                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4527         };
4528
4529         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4530                                              strs, sizeof(strs)));
4531 }
4532
4533 static int probe_kern_btf_func(void)
4534 {
4535         static const char strs[] = "\0int\0x\0a";
4536         /* void x(int a) {} */
4537         __u32 types[] = {
4538                 /* int */
4539                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4540                 /* FUNC_PROTO */                                /* [2] */
4541                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4542                 BTF_PARAM_ENC(7, 1),
4543                 /* FUNC x */                                    /* [3] */
4544                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4545         };
4546
4547         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4548                                              strs, sizeof(strs)));
4549 }
4550
4551 static int probe_kern_btf_func_global(void)
4552 {
4553         static const char strs[] = "\0int\0x\0a";
4554         /* static void x(int a) {} */
4555         __u32 types[] = {
4556                 /* int */
4557                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4558                 /* FUNC_PROTO */                                /* [2] */
4559                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4560                 BTF_PARAM_ENC(7, 1),
4561                 /* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
4562                 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4563         };
4564
4565         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4566                                              strs, sizeof(strs)));
4567 }
4568
4569 static int probe_kern_btf_datasec(void)
4570 {
4571         static const char strs[] = "\0x\0.data";
4572         /* static int a; */
4573         __u32 types[] = {
4574                 /* int */
4575                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4576                 /* VAR x */                                     /* [2] */
4577                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4578                 BTF_VAR_STATIC,
4579                 /* DATASEC val */                               /* [3] */
4580                 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4581                 BTF_VAR_SECINFO_ENC(2, 0, 4),
4582         };
4583
4584         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4585                                              strs, sizeof(strs)));
4586 }
4587
4588 static int probe_kern_btf_float(void)
4589 {
4590         static const char strs[] = "\0float";
4591         __u32 types[] = {
4592                 /* float */
4593                 BTF_TYPE_FLOAT_ENC(1, 4),
4594         };
4595
4596         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4597                                              strs, sizeof(strs)));
4598 }
4599
4600 static int probe_kern_btf_decl_tag(void)
4601 {
4602         static const char strs[] = "\0tag";
4603         __u32 types[] = {
4604                 /* int */
4605                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4606                 /* VAR x */                                     /* [2] */
4607                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4608                 BTF_VAR_STATIC,
4609                 /* attr */
4610                 BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
4611         };
4612
4613         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4614                                              strs, sizeof(strs)));
4615 }
4616
4617 static int probe_kern_btf_type_tag(void)
4618 {
4619         static const char strs[] = "\0tag";
4620         __u32 types[] = {
4621                 /* int */
4622                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
4623                 /* attr */
4624                 BTF_TYPE_TYPE_TAG_ENC(1, 1),                            /* [2] */
4625                 /* ptr */
4626                 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),   /* [3] */
4627         };
4628
4629         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4630                                              strs, sizeof(strs)));
4631 }
4632
4633 static int probe_kern_array_mmap(void)
4634 {
4635         LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
4636         int fd;
4637
4638         fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
4639         return probe_fd(fd);
4640 }
4641
4642 static int probe_kern_exp_attach_type(void)
4643 {
4644         LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
4645         struct bpf_insn insns[] = {
4646                 BPF_MOV64_IMM(BPF_REG_0, 0),
4647                 BPF_EXIT_INSN(),
4648         };
4649         int fd, insn_cnt = ARRAY_SIZE(insns);
4650
4651         /* use any valid combination of program type and (optional)
4652          * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
4653          * to see if kernel supports expected_attach_type field for
4654          * BPF_PROG_LOAD command
4655          */
4656         fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
4657         return probe_fd(fd);
4658 }
4659
4660 static int probe_kern_probe_read_kernel(void)
4661 {
4662         struct bpf_insn insns[] = {
4663                 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),   /* r1 = r10 (fp) */
4664                 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),  /* r1 += -8 */
4665                 BPF_MOV64_IMM(BPF_REG_2, 8),            /* r2 = 8 */
4666                 BPF_MOV64_IMM(BPF_REG_3, 0),            /* r3 = 0 */
4667                 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4668                 BPF_EXIT_INSN(),
4669         };
4670         int fd, insn_cnt = ARRAY_SIZE(insns);
4671
4672         fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4673         return probe_fd(fd);
4674 }
4675
4676 static int probe_prog_bind_map(void)
4677 {
4678         char *cp, errmsg[STRERR_BUFSIZE];
4679         struct bpf_insn insns[] = {
4680                 BPF_MOV64_IMM(BPF_REG_0, 0),
4681                 BPF_EXIT_INSN(),
4682         };
4683         int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
4684
4685         map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
4686         if (map < 0) {
4687                 ret = -errno;
4688                 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4689                 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4690                         __func__, cp, -ret);
4691                 return ret;
4692         }
4693
4694         prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4695         if (prog < 0) {
4696                 close(map);
4697                 return 0;
4698         }
4699
4700         ret = bpf_prog_bind_map(prog, map, NULL);
4701
4702         close(map);
4703         close(prog);
4704
4705         return ret >= 0;
4706 }
4707
4708 static int probe_module_btf(void)
4709 {
4710         static const char strs[] = "\0int";
4711         __u32 types[] = {
4712                 /* int */
4713                 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4714         };
4715         struct bpf_btf_info info;
4716         __u32 len = sizeof(info);
4717         char name[16];
4718         int fd, err;
4719
4720         fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4721         if (fd < 0)
4722                 return 0; /* BTF not supported at all */
4723
4724         memset(&info, 0, sizeof(info));
4725         info.name = ptr_to_u64(name);
4726         info.name_len = sizeof(name);
4727
4728         /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4729          * kernel's module BTF support coincides with support for
4730          * name/name_len fields in struct bpf_btf_info.
4731          */
4732         err = bpf_btf_get_info_by_fd(fd, &info, &len);
4733         close(fd);
4734         return !err;
4735 }
4736
4737 static int probe_perf_link(void)
4738 {
4739         struct bpf_insn insns[] = {
4740                 BPF_MOV64_IMM(BPF_REG_0, 0),
4741                 BPF_EXIT_INSN(),
4742         };
4743         int prog_fd, link_fd, err;
4744
4745         prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
4746                                 insns, ARRAY_SIZE(insns), NULL);
4747         if (prog_fd < 0)
4748                 return -errno;
4749
4750         /* use invalid perf_event FD to get EBADF, if link is supported;
4751          * otherwise EINVAL should be returned
4752          */
4753         link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
4754         err = -errno; /* close() can clobber errno */
4755
4756         if (link_fd >= 0)
4757                 close(link_fd);
4758         close(prog_fd);
4759
4760         return link_fd < 0 && err == -EBADF;
4761 }
4762
4763 static int probe_kern_bpf_cookie(void)
4764 {
4765         struct bpf_insn insns[] = {
4766                 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
4767                 BPF_EXIT_INSN(),
4768         };
4769         int ret, insn_cnt = ARRAY_SIZE(insns);
4770
4771         ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
4772         return probe_fd(ret);
4773 }
4774
4775 static int probe_kern_btf_enum64(void)
4776 {
4777         static const char strs[] = "\0enum64";
4778         __u32 types[] = {
4779                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
4780         };
4781
4782         return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4783                                              strs, sizeof(strs)));
4784 }
4785
4786 static int probe_kern_syscall_wrapper(void);
4787
4788 enum kern_feature_result {
4789         FEAT_UNKNOWN = 0,
4790         FEAT_SUPPORTED = 1,
4791         FEAT_MISSING = 2,
4792 };
4793
4794 typedef int (*feature_probe_fn)(void);
4795
4796 static struct kern_feature_desc {
4797         const char *desc;
4798         feature_probe_fn probe;
4799         enum kern_feature_result res;
4800 } feature_probes[__FEAT_CNT] = {
4801         [FEAT_PROG_NAME] = {
4802                 "BPF program name", probe_kern_prog_name,
4803         },
4804         [FEAT_GLOBAL_DATA] = {
4805                 "global variables", probe_kern_global_data,
4806         },
4807         [FEAT_BTF] = {
4808                 "minimal BTF", probe_kern_btf,
4809         },
4810         [FEAT_BTF_FUNC] = {
4811                 "BTF functions", probe_kern_btf_func,
4812         },
4813         [FEAT_BTF_GLOBAL_FUNC] = {
4814                 "BTF global function", probe_kern_btf_func_global,
4815         },
4816         [FEAT_BTF_DATASEC] = {
4817                 "BTF data section and variable", probe_kern_btf_datasec,
4818         },
4819         [FEAT_ARRAY_MMAP] = {
4820                 "ARRAY map mmap()", probe_kern_array_mmap,
4821         },
4822         [FEAT_EXP_ATTACH_TYPE] = {
4823                 "BPF_PROG_LOAD expected_attach_type attribute",
4824                 probe_kern_exp_attach_type,
4825         },
4826         [FEAT_PROBE_READ_KERN] = {
4827                 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4828         },
4829         [FEAT_PROG_BIND_MAP] = {
4830                 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4831         },
4832         [FEAT_MODULE_BTF] = {
4833                 "module BTF support", probe_module_btf,
4834         },
4835         [FEAT_BTF_FLOAT] = {
4836                 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4837         },
4838         [FEAT_PERF_LINK] = {
4839                 "BPF perf link support", probe_perf_link,
4840         },
4841         [FEAT_BTF_DECL_TAG] = {
4842                 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
4843         },
4844         [FEAT_BTF_TYPE_TAG] = {
4845                 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
4846         },
4847         [FEAT_MEMCG_ACCOUNT] = {
4848                 "memcg-based memory accounting", probe_memcg_account,
4849         },
4850         [FEAT_BPF_COOKIE] = {
4851                 "BPF cookie support", probe_kern_bpf_cookie,
4852         },
4853         [FEAT_BTF_ENUM64] = {
4854                 "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
4855         },
4856         [FEAT_SYSCALL_WRAPPER] = {
4857                 "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
4858         },
4859 };
4860
4861 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4862 {
4863         struct kern_feature_desc *feat = &feature_probes[feat_id];
4864         int ret;
4865
4866         if (obj && obj->gen_loader)
4867                 /* To generate loader program assume the latest kernel
4868                  * to avoid doing extra prog_load, map_create syscalls.
4869                  */
4870                 return true;
4871
4872         if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4873                 ret = feat->probe();
4874                 if (ret > 0) {
4875                         WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4876                 } else if (ret == 0) {
4877                         WRITE_ONCE(feat->res, FEAT_MISSING);
4878                 } else {
4879                         pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4880                         WRITE_ONCE(feat->res, FEAT_MISSING);
4881                 }
4882         }
4883
4884         return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4885 }
4886
4887 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4888 {
4889         struct bpf_map_info map_info;
4890         char msg[STRERR_BUFSIZE];
4891         __u32 map_info_len = sizeof(map_info);
4892         int err;
4893
4894         memset(&map_info, 0, map_info_len);
4895         err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
4896         if (err && errno == EINVAL)
4897                 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4898         if (err) {
4899                 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4900                         libbpf_strerror_r(errno, msg, sizeof(msg)));
4901                 return false;
4902         }
4903
4904         return (map_info.type == map->def.type &&
4905                 map_info.key_size == map->def.key_size &&
4906                 map_info.value_size == map->def.value_size &&
4907                 map_info.max_entries == map->def.max_entries &&
4908                 map_info.map_flags == map->def.map_flags &&
4909                 map_info.map_extra == map->map_extra);
4910 }
4911
4912 static int
4913 bpf_object__reuse_map(struct bpf_map *map)
4914 {
4915         char *cp, errmsg[STRERR_BUFSIZE];
4916         int err, pin_fd;
4917
4918         pin_fd = bpf_obj_get(map->pin_path);
4919         if (pin_fd < 0) {
4920                 err = -errno;
4921                 if (err == -ENOENT) {
4922                         pr_debug("found no pinned map to reuse at '%s'\n",
4923                                  map->pin_path);
4924                         return 0;
4925                 }
4926
4927                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4928                 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4929                         map->pin_path, cp);
4930                 return err;
4931         }
4932
4933         if (!map_is_reuse_compat(map, pin_fd)) {
4934                 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4935                         map->pin_path);
4936                 close(pin_fd);
4937                 return -EINVAL;
4938         }
4939
4940         err = bpf_map__reuse_fd(map, pin_fd);
4941         close(pin_fd);
4942         if (err)
4943                 return err;
4944
4945         map->pinned = true;
4946         pr_debug("reused pinned map at '%s'\n", map->pin_path);
4947
4948         return 0;
4949 }
4950
4951 static int
4952 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4953 {
4954         enum libbpf_map_type map_type = map->libbpf_type;
4955         char *cp, errmsg[STRERR_BUFSIZE];
4956         int err, zero = 0;
4957
4958         if (obj->gen_loader) {
4959                 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4960                                          map->mmaped, map->def.value_size);
4961                 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4962                         bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4963                 return 0;
4964         }
4965         err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4966         if (err) {
4967                 err = -errno;
4968                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4969                 pr_warn("Error setting initial map(%s) contents: %s\n",
4970                         map->name, cp);
4971                 return err;
4972         }
4973
4974         /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4975         if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4976                 err = bpf_map_freeze(map->fd);
4977                 if (err) {
4978                         err = -errno;
4979                         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4980                         pr_warn("Error freezing map(%s) as read-only: %s\n",
4981                                 map->name, cp);
4982                         return err;
4983                 }
4984         }
4985         return 0;
4986 }
4987
4988 static void bpf_map__destroy(struct bpf_map *map);
4989
4990 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4991 {
4992         LIBBPF_OPTS(bpf_map_create_opts, create_attr);
4993         struct bpf_map_def *def = &map->def;
4994         const char *map_name = NULL;
4995         int err = 0;
4996
4997         if (kernel_supports(obj, FEAT_PROG_NAME))
4998                 map_name = map->name;
4999         create_attr.map_ifindex = map->map_ifindex;
5000         create_attr.map_flags = def->map_flags;
5001         create_attr.numa_node = map->numa_node;
5002         create_attr.map_extra = map->map_extra;
5003
5004         if (bpf_map__is_struct_ops(map))
5005                 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5006
5007         if (obj->btf && btf__fd(obj->btf) >= 0) {
5008                 create_attr.btf_fd = btf__fd(obj->btf);
5009                 create_attr.btf_key_type_id = map->btf_key_type_id;
5010                 create_attr.btf_value_type_id = map->btf_value_type_id;
5011         }
5012
5013         if (bpf_map_type__is_map_in_map(def->type)) {
5014                 if (map->inner_map) {
5015                         err = bpf_object__create_map(obj, map->inner_map, true);
5016                         if (err) {
5017                                 pr_warn("map '%s': failed to create inner map: %d\n",
5018                                         map->name, err);
5019                                 return err;
5020                         }
5021                         map->inner_map_fd = bpf_map__fd(map->inner_map);
5022                 }
5023                 if (map->inner_map_fd >= 0)
5024                         create_attr.inner_map_fd = map->inner_map_fd;
5025         }
5026
5027         switch (def->type) {
5028         case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5029         case BPF_MAP_TYPE_CGROUP_ARRAY:
5030         case BPF_MAP_TYPE_STACK_TRACE:
5031         case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5032         case BPF_MAP_TYPE_HASH_OF_MAPS:
5033         case BPF_MAP_TYPE_DEVMAP:
5034         case BPF_MAP_TYPE_DEVMAP_HASH:
5035         case BPF_MAP_TYPE_CPUMAP:
5036         case BPF_MAP_TYPE_XSKMAP:
5037         case BPF_MAP_TYPE_SOCKMAP:
5038         case BPF_MAP_TYPE_SOCKHASH:
5039         case BPF_MAP_TYPE_QUEUE:
5040         case BPF_MAP_TYPE_STACK:
5041                 create_attr.btf_fd = 0;
5042                 create_attr.btf_key_type_id = 0;
5043                 create_attr.btf_value_type_id = 0;
5044                 map->btf_key_type_id = 0;
5045                 map->btf_value_type_id = 0;
5046         default:
5047                 break;
5048         }
5049
5050         if (obj->gen_loader) {
5051                 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5052                                     def->key_size, def->value_size, def->max_entries,
5053                                     &create_attr, is_inner ? -1 : map - obj->maps);
5054                 /* Pretend to have valid FD to pass various fd >= 0 checks.
5055                  * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
5056                  */
5057                 map->fd = 0;
5058         } else {
5059                 map->fd = bpf_map_create(def->type, map_name,
5060                                          def->key_size, def->value_size,
5061                                          def->max_entries, &create_attr);
5062         }
5063         if (map->fd < 0 && (create_attr.btf_key_type_id ||
5064                             create_attr.btf_value_type_id)) {
5065                 char *cp, errmsg[STRERR_BUFSIZE];
5066
5067                 err = -errno;
5068                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5069                 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5070                         map->name, cp, err);
5071                 create_attr.btf_fd = 0;
5072                 create_attr.btf_key_type_id = 0;
5073                 create_attr.btf_value_type_id = 0;
5074                 map->btf_key_type_id = 0;
5075                 map->btf_value_type_id = 0;
5076                 map->fd = bpf_map_create(def->type, map_name,
5077                                          def->key_size, def->value_size,
5078                                          def->max_entries, &create_attr);
5079         }
5080
5081         err = map->fd < 0 ? -errno : 0;
5082
5083         if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5084                 if (obj->gen_loader)
5085                         map->inner_map->fd = -1;
5086                 bpf_map__destroy(map->inner_map);
5087                 zfree(&map->inner_map);
5088         }
5089
5090         return err;
5091 }
5092
5093 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5094 {
5095         const struct bpf_map *targ_map;
5096         unsigned int i;
5097         int fd, err = 0;
5098
5099         for (i = 0; i < map->init_slots_sz; i++) {
5100                 if (!map->init_slots[i])
5101                         continue;
5102
5103                 targ_map = map->init_slots[i];
5104                 fd = bpf_map__fd(targ_map);
5105
5106                 if (obj->gen_loader) {
5107                         bpf_gen__populate_outer_map(obj->gen_loader,
5108                                                     map - obj->maps, i,
5109                                                     targ_map - obj->maps);
5110                 } else {
5111                         err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5112                 }
5113                 if (err) {
5114                         err = -errno;
5115                         pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5116                                 map->name, i, targ_map->name, fd, err);
5117                         return err;
5118                 }
5119                 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5120                          map->name, i, targ_map->name, fd);
5121         }
5122
5123         zfree(&map->init_slots);
5124         map->init_slots_sz = 0;
5125
5126         return 0;
5127 }
5128
5129 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5130 {
5131         const struct bpf_program *targ_prog;
5132         unsigned int i;
5133         int fd, err;
5134
5135         if (obj->gen_loader)
5136                 return -ENOTSUP;
5137
5138         for (i = 0; i < map->init_slots_sz; i++) {
5139                 if (!map->init_slots[i])
5140                         continue;
5141
5142                 targ_prog = map->init_slots[i];
5143                 fd = bpf_program__fd(targ_prog);
5144
5145                 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5146                 if (err) {
5147                         err = -errno;
5148                         pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5149                                 map->name, i, targ_prog->name, fd, err);
5150                         return err;
5151                 }
5152                 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5153                          map->name, i, targ_prog->name, fd);
5154         }
5155
5156         zfree(&map->init_slots);
5157         map->init_slots_sz = 0;
5158
5159         return 0;
5160 }
5161
5162 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5163 {
5164         struct bpf_map *map;
5165         int i, err;
5166
5167         for (i = 0; i < obj->nr_maps; i++) {
5168                 map = &obj->maps[i];
5169
5170                 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5171                         continue;
5172
5173                 err = init_prog_array_slots(obj, map);
5174                 if (err < 0) {
5175                         zclose(map->fd);
5176                         return err;
5177                 }
5178         }
5179         return 0;
5180 }
5181
5182 static int map_set_def_max_entries(struct bpf_map *map)
5183 {
5184         if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5185                 int nr_cpus;
5186
5187                 nr_cpus = libbpf_num_possible_cpus();
5188                 if (nr_cpus < 0) {
5189                         pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5190                                 map->name, nr_cpus);
5191                         return nr_cpus;
5192                 }
5193                 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5194                 map->def.max_entries = nr_cpus;
5195         }
5196
5197         return 0;
5198 }
5199
5200 static int
5201 bpf_object__create_maps(struct bpf_object *obj)
5202 {
5203         struct bpf_map *map;
5204         char *cp, errmsg[STRERR_BUFSIZE];
5205         unsigned int i, j;
5206         int err;
5207         bool retried;
5208
5209         for (i = 0; i < obj->nr_maps; i++) {
5210                 map = &obj->maps[i];
5211
5212                 /* To support old kernels, we skip creating global data maps
5213                  * (.rodata, .data, .kconfig, etc); later on, during program
5214                  * loading, if we detect that at least one of the to-be-loaded
5215                  * programs is referencing any global data map, we'll error
5216                  * out with program name and relocation index logged.
5217                  * This approach allows to accommodate Clang emitting
5218                  * unnecessary .rodata.str1.1 sections for string literals,
5219                  * but also it allows to have CO-RE applications that use
5220                  * global variables in some of BPF programs, but not others.
5221                  * If those global variable-using programs are not loaded at
5222                  * runtime due to bpf_program__set_autoload(prog, false),
5223                  * bpf_object loading will succeed just fine even on old
5224                  * kernels.
5225                  */
5226                 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5227                         map->autocreate = false;
5228
5229                 if (!map->autocreate) {
5230                         pr_debug("map '%s': skipped auto-creating...\n", map->name);
5231                         continue;
5232                 }
5233
5234                 err = map_set_def_max_entries(map);
5235                 if (err)
5236                         goto err_out;
5237
5238                 retried = false;
5239 retry:
5240                 if (map->pin_path) {
5241                         err = bpf_object__reuse_map(map);
5242                         if (err) {
5243                                 pr_warn("map '%s': error reusing pinned map\n",
5244                                         map->name);
5245                                 goto err_out;
5246                         }
5247                         if (retried && map->fd < 0) {
5248                                 pr_warn("map '%s': cannot find pinned map\n",
5249                                         map->name);
5250                                 err = -ENOENT;
5251                                 goto err_out;
5252                         }
5253                 }
5254
5255                 if (map->fd >= 0) {
5256                         pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5257                                  map->name, map->fd);
5258                 } else {
5259                         err = bpf_object__create_map(obj, map, false);
5260                         if (err)
5261                                 goto err_out;
5262
5263                         pr_debug("map '%s': created successfully, fd=%d\n",
5264                                  map->name, map->fd);
5265
5266                         if (bpf_map__is_internal(map)) {
5267                                 err = bpf_object__populate_internal_map(obj, map);
5268                                 if (err < 0) {
5269                                         zclose(map->fd);
5270                                         goto err_out;
5271                                 }
5272                         }
5273
5274                         if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5275                                 err = init_map_in_map_slots(obj, map);
5276                                 if (err < 0) {
5277                                         zclose(map->fd);
5278                                         goto err_out;
5279                                 }
5280                         }
5281                 }
5282
5283                 if (map->pin_path && !map->pinned) {
5284                         err = bpf_map__pin(map, NULL);
5285                         if (err) {
5286                                 zclose(map->fd);
5287                                 if (!retried && err == -EEXIST) {
5288                                         retried = true;
5289                                         goto retry;
5290                                 }
5291                                 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5292                                         map->name, map->pin_path, err);
5293                                 goto err_out;
5294                         }
5295                 }
5296         }
5297
5298         return 0;
5299
5300 err_out:
5301         cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5302         pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5303         pr_perm_msg(err);
5304         for (j = 0; j < i; j++)
5305                 zclose(obj->maps[j].fd);
5306         return err;
5307 }
5308
5309 static bool bpf_core_is_flavor_sep(const char *s)
5310 {
5311         /* check X___Y name pattern, where X and Y are not underscores */
5312         return s[0] != '_' &&                                 /* X */
5313                s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
5314                s[4] != '_';                                   /* Y */
5315 }
5316
5317 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
5318  * before last triple underscore. Struct name part after last triple
5319  * underscore is ignored by BPF CO-RE relocation during relocation matching.
5320  */
5321 size_t bpf_core_essential_name_len(const char *name)
5322 {
5323         size_t n = strlen(name);
5324         int i;
5325
5326         for (i = n - 5; i >= 0; i--) {
5327                 if (bpf_core_is_flavor_sep(name + i))
5328                         return i + 1;
5329         }
5330         return n;
5331 }
5332
5333 void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5334 {
5335         if (!cands)
5336                 return;
5337
5338         free(cands->cands);
5339         free(cands);
5340 }
5341
5342 int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5343                        size_t local_essent_len,
5344                        const struct btf *targ_btf,
5345                        const char *targ_btf_name,
5346                        int targ_start_id,
5347                        struct bpf_core_cand_list *cands)
5348 {
5349         struct bpf_core_cand *new_cands, *cand;
5350         const struct btf_type *t, *local_t;
5351         const char *targ_name, *local_name;
5352         size_t targ_essent_len;
5353         int n, i;
5354
5355         local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5356         local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5357
5358         n = btf__type_cnt(targ_btf);
5359         for (i = targ_start_id; i < n; i++) {
5360                 t = btf__type_by_id(targ_btf, i);
5361                 if (!btf_kind_core_compat(t, local_t))
5362                         continue;
5363
5364                 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5365                 if (str_is_empty(targ_name))
5366                         continue;
5367
5368                 targ_essent_len = bpf_core_essential_name_len(targ_name);
5369                 if (targ_essent_len != local_essent_len)
5370                         continue;
5371
5372                 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5373                         continue;
5374
5375                 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5376                          local_cand->id, btf_kind_str(local_t),
5377                          local_name, i, btf_kind_str(t), targ_name,
5378                          targ_btf_name);
5379                 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5380                                               sizeof(*cands->cands));
5381                 if (!new_cands)
5382                         return -ENOMEM;
5383
5384                 cand = &new_cands[cands->len];
5385                 cand->btf = targ_btf;
5386                 cand->id = i;
5387
5388                 cands->cands = new_cands;
5389                 cands->len++;
5390         }
5391         return 0;
5392 }
5393
5394 static int load_module_btfs(struct bpf_object *obj)
5395 {
5396         struct bpf_btf_info info;
5397         struct module_btf *mod_btf;
5398         struct btf *btf;
5399         char name[64];
5400         __u32 id = 0, len;
5401         int err, fd;
5402
5403         if (obj->btf_modules_loaded)
5404                 return 0;
5405
5406         if (obj->gen_loader)
5407                 return 0;
5408
5409         /* don't do this again, even if we find no module BTFs */
5410         obj->btf_modules_loaded = true;
5411
5412         /* kernel too old to support module BTFs */
5413         if (!kernel_supports(obj, FEAT_MODULE_BTF))
5414                 return 0;
5415
5416         while (true) {
5417                 err = bpf_btf_get_next_id(id, &id);
5418                 if (err && errno == ENOENT)
5419                         return 0;
5420                 if (err) {
5421                         err = -errno;
5422                         pr_warn("failed to iterate BTF objects: %d\n", err);
5423                         return err;
5424                 }
5425
5426                 fd = bpf_btf_get_fd_by_id(id);
5427                 if (fd < 0) {
5428                         if (errno == ENOENT)
5429                                 continue; /* expected race: BTF was unloaded */
5430                         err = -errno;
5431                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5432                         return err;
5433                 }
5434
5435                 len = sizeof(info);
5436                 memset(&info, 0, sizeof(info));
5437                 info.name = ptr_to_u64(name);
5438                 info.name_len = sizeof(name);
5439
5440                 err = bpf_btf_get_info_by_fd(fd, &info, &len);
5441                 if (err) {
5442                         err = -errno;
5443                         pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5444                         goto err_out;
5445                 }
5446
5447                 /* ignore non-module BTFs */
5448                 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5449                         close(fd);
5450                         continue;
5451                 }
5452
5453                 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5454                 err = libbpf_get_error(btf);
5455                 if (err) {
5456                         pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5457                                 name, id, err);
5458                         goto err_out;
5459                 }
5460
5461                 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5462                                         sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5463                 if (err)
5464                         goto err_out;
5465
5466                 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5467
5468                 mod_btf->btf = btf;
5469                 mod_btf->id = id;
5470                 mod_btf->fd = fd;
5471                 mod_btf->name = strdup(name);
5472                 if (!mod_btf->name) {
5473                         err = -ENOMEM;
5474                         goto err_out;
5475                 }
5476                 continue;
5477
5478 err_out:
5479                 close(fd);
5480                 return err;
5481         }
5482
5483         return 0;
5484 }
5485
5486 static struct bpf_core_cand_list *
5487 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5488 {
5489         struct bpf_core_cand local_cand = {};
5490         struct bpf_core_cand_list *cands;
5491         const struct btf *main_btf;
5492         const struct btf_type *local_t;
5493         const char *local_name;
5494         size_t local_essent_len;
5495         int err, i;
5496
5497         local_cand.btf = local_btf;
5498         local_cand.id = local_type_id;
5499         local_t = btf__type_by_id(local_btf, local_type_id);
5500         if (!local_t)
5501                 return ERR_PTR(-EINVAL);
5502
5503         local_name = btf__name_by_offset(local_btf, local_t->name_off);
5504         if (str_is_empty(local_name))
5505                 return ERR_PTR(-EINVAL);
5506         local_essent_len = bpf_core_essential_name_len(local_name);
5507
5508         cands = calloc(1, sizeof(*cands));
5509         if (!cands)
5510                 return ERR_PTR(-ENOMEM);
5511
5512         /* Attempt to find target candidates in vmlinux BTF first */
5513         main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5514         err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5515         if (err)
5516                 goto err_out;
5517
5518         /* if vmlinux BTF has any candidate, don't got for module BTFs */
5519         if (cands->len)
5520                 return cands;
5521
5522         /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5523         if (obj->btf_vmlinux_override)
5524                 return cands;
5525
5526         /* now look through module BTFs, trying to still find candidates */
5527         err = load_module_btfs(obj);
5528         if (err)
5529                 goto err_out;
5530
5531         for (i = 0; i < obj->btf_module_cnt; i++) {
5532                 err = bpf_core_add_cands(&local_cand, local_essent_len,
5533                                          obj->btf_modules[i].btf,
5534                                          obj->btf_modules[i].name,
5535                                          btf__type_cnt(obj->btf_vmlinux),
5536                                          cands);
5537                 if (err)
5538                         goto err_out;
5539         }
5540
5541         return cands;
5542 err_out:
5543         bpf_core_free_cands(cands);
5544         return ERR_PTR(err);
5545 }
5546
5547 /* Check local and target types for compatibility. This check is used for
5548  * type-based CO-RE relocations and follow slightly different rules than
5549  * field-based relocations. This function assumes that root types were already
5550  * checked for name match. Beyond that initial root-level name check, names
5551  * are completely ignored. Compatibility rules are as follows:
5552  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5553  *     kind should match for local and target types (i.e., STRUCT is not
5554  *     compatible with UNION);
5555  *   - for ENUMs, the size is ignored;
5556  *   - for INT, size and signedness are ignored;
5557  *   - for ARRAY, dimensionality is ignored, element types are checked for
5558  *     compatibility recursively;
5559  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5560  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5561  *   - FUNC_PROTOs are compatible if they have compatible signature: same
5562  *     number of input args and compatible return and argument types.
5563  * These rules are not set in stone and probably will be adjusted as we get
5564  * more experience with using BPF CO-RE relocations.
5565  */
5566 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5567                               const struct btf *targ_btf, __u32 targ_id)
5568 {
5569         return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5570 }
5571
5572 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5573                          const struct btf *targ_btf, __u32 targ_id)
5574 {
5575         return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5576 }
5577
5578 static size_t bpf_core_hash_fn(const long key, void *ctx)
5579 {
5580         return key;
5581 }
5582
5583 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
5584 {
5585         return k1 == k2;
5586 }
5587
5588 static int record_relo_core(struct bpf_program *prog,
5589                             const struct bpf_core_relo *core_relo, int insn_idx)
5590 {
5591         struct reloc_desc *relos, *relo;
5592
5593         relos = libbpf_reallocarray(prog->reloc_desc,
5594                                     prog->nr_reloc + 1, sizeof(*relos));
5595         if (!relos)
5596                 return -ENOMEM;
5597         relo = &relos[prog->nr_reloc];
5598         relo->type = RELO_CORE;
5599         relo->insn_idx = insn_idx;
5600         relo->core_relo = core_relo;
5601         prog->reloc_desc = relos;
5602         prog->nr_reloc++;
5603         return 0;
5604 }
5605
5606 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5607 {
5608         struct reloc_desc *relo;
5609         int i;
5610
5611         for (i = 0; i < prog->nr_reloc; i++) {
5612                 relo = &prog->reloc_desc[i];
5613                 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5614                         continue;
5615
5616                 return relo->core_relo;
5617         }
5618
5619         return NULL;
5620 }
5621
5622 static int bpf_core_resolve_relo(struct bpf_program *prog,
5623                                  const struct bpf_core_relo *relo,
5624                                  int relo_idx,
5625                                  const struct btf *local_btf,
5626                                  struct hashmap *cand_cache,
5627                                  struct bpf_core_relo_res *targ_res)
5628 {
5629         struct bpf_core_spec specs_scratch[3] = {};
5630         struct bpf_core_cand_list *cands = NULL;
5631         const char *prog_name = prog->name;
5632         const struct btf_type *local_type;
5633         const char *local_name;
5634         __u32 local_id = relo->type_id;
5635         int err;
5636
5637         local_type = btf__type_by_id(local_btf, local_id);
5638         if (!local_type)
5639                 return -EINVAL;
5640
5641         local_name = btf__name_by_offset(local_btf, local_type->name_off);
5642         if (!local_name)
5643                 return -EINVAL;
5644
5645         if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5646             !hashmap__find(cand_cache, local_id, &cands)) {
5647                 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5648                 if (IS_ERR(cands)) {
5649                         pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5650                                 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5651                                 local_name, PTR_ERR(cands));
5652                         return PTR_ERR(cands);
5653                 }
5654                 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
5655                 if (err) {
5656                         bpf_core_free_cands(cands);
5657                         return err;
5658                 }
5659         }
5660
5661         return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5662                                        targ_res);
5663 }
5664
5665 static int
5666 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5667 {
5668         const struct btf_ext_info_sec *sec;
5669         struct bpf_core_relo_res targ_res;
5670         const struct bpf_core_relo *rec;
5671         const struct btf_ext_info *seg;
5672         struct hashmap_entry *entry;
5673         struct hashmap *cand_cache = NULL;
5674         struct bpf_program *prog;
5675         struct bpf_insn *insn;
5676         const char *sec_name;
5677         int i, err = 0, insn_idx, sec_idx, sec_num;
5678
5679         if (obj->btf_ext->core_relo_info.len == 0)
5680                 return 0;
5681
5682         if (targ_btf_path) {
5683                 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5684                 err = libbpf_get_error(obj->btf_vmlinux_override);
5685                 if (err) {
5686                         pr_warn("failed to parse target BTF: %d\n", err);
5687                         return err;
5688                 }
5689         }
5690
5691         cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5692         if (IS_ERR(cand_cache)) {
5693                 err = PTR_ERR(cand_cache);
5694                 goto out;
5695         }
5696
5697         seg = &obj->btf_ext->core_relo_info;
5698         sec_num = 0;
5699         for_each_btf_ext_sec(seg, sec) {
5700                 sec_idx = seg->sec_idxs[sec_num];
5701                 sec_num++;
5702
5703                 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5704                 if (str_is_empty(sec_name)) {
5705                         err = -EINVAL;
5706                         goto out;
5707                 }
5708
5709                 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5710
5711                 for_each_btf_ext_rec(seg, sec, i, rec) {
5712                         if (rec->insn_off % BPF_INSN_SZ)
5713                                 return -EINVAL;
5714                         insn_idx = rec->insn_off / BPF_INSN_SZ;
5715                         prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5716                         if (!prog) {
5717                                 /* When __weak subprog is "overridden" by another instance
5718                                  * of the subprog from a different object file, linker still
5719                                  * appends all the .BTF.ext info that used to belong to that
5720                                  * eliminated subprogram.
5721                                  * This is similar to what x86-64 linker does for relocations.
5722                                  * So just ignore such relocations just like we ignore
5723                                  * subprog instructions when discovering subprograms.
5724                                  */
5725                                 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5726                                          sec_name, i, insn_idx);
5727                                 continue;
5728                         }
5729                         /* no need to apply CO-RE relocation if the program is
5730                          * not going to be loaded
5731                          */
5732                         if (!prog->autoload)
5733                                 continue;
5734
5735                         /* adjust insn_idx from section frame of reference to the local
5736                          * program's frame of reference; (sub-)program code is not yet
5737                          * relocated, so it's enough to just subtract in-section offset
5738                          */
5739                         insn_idx = insn_idx - prog->sec_insn_off;
5740                         if (insn_idx >= prog->insns_cnt)
5741                                 return -EINVAL;
5742                         insn = &prog->insns[insn_idx];
5743
5744                         err = record_relo_core(prog, rec, insn_idx);
5745                         if (err) {
5746                                 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5747                                         prog->name, i, err);
5748                                 goto out;
5749                         }
5750
5751                         if (prog->obj->gen_loader)
5752                                 continue;
5753
5754                         err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5755                         if (err) {
5756                                 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5757                                         prog->name, i, err);
5758                                 goto out;
5759                         }
5760
5761                         err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5762                         if (err) {
5763                                 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5764                                         prog->name, i, insn_idx, err);
5765                                 goto out;
5766                         }
5767                 }
5768         }
5769
5770 out:
5771         /* obj->btf_vmlinux and module BTFs are freed after object load */
5772         btf__free(obj->btf_vmlinux_override);
5773         obj->btf_vmlinux_override = NULL;
5774
5775         if (!IS_ERR_OR_NULL(cand_cache)) {
5776                 hashmap__for_each_entry(cand_cache, entry, i) {
5777                         bpf_core_free_cands(entry->pvalue);
5778                 }
5779                 hashmap__free(cand_cache);
5780         }
5781         return err;
5782 }
5783
5784 /* base map load ldimm64 special constant, used also for log fixup logic */
5785 #define MAP_LDIMM64_POISON_BASE 2001000000
5786 #define MAP_LDIMM64_POISON_PFX "200100"
5787
5788 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5789                                int insn_idx, struct bpf_insn *insn,
5790                                int map_idx, const struct bpf_map *map)
5791 {
5792         int i;
5793
5794         pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5795                  prog->name, relo_idx, insn_idx, map_idx, map->name);
5796
5797         /* we turn single ldimm64 into two identical invalid calls */
5798         for (i = 0; i < 2; i++) {
5799                 insn->code = BPF_JMP | BPF_CALL;
5800                 insn->dst_reg = 0;
5801                 insn->src_reg = 0;
5802                 insn->off = 0;
5803                 /* if this instruction is reachable (not a dead code),
5804                  * verifier will complain with something like:
5805                  * invalid func unknown#2001000123
5806                  * where lower 123 is map index into obj->maps[] array
5807                  */
5808                 insn->imm = MAP_LDIMM64_POISON_BASE + map_idx;
5809
5810                 insn++;
5811         }
5812 }
5813
5814 /* Relocate data references within program code:
5815  *  - map references;
5816  *  - global variable references;
5817  *  - extern references.
5818  */
5819 static int
5820 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5821 {
5822         int i;
5823
5824         for (i = 0; i < prog->nr_reloc; i++) {
5825                 struct reloc_desc *relo = &prog->reloc_desc[i];
5826                 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5827                 const struct bpf_map *map;
5828                 struct extern_desc *ext;
5829
5830                 switch (relo->type) {
5831                 case RELO_LD64:
5832                         map = &obj->maps[relo->map_idx];
5833                         if (obj->gen_loader) {
5834                                 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5835                                 insn[0].imm = relo->map_idx;
5836                         } else if (map->autocreate) {
5837                                 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5838                                 insn[0].imm = map->fd;
5839                         } else {
5840                                 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5841                                                    relo->map_idx, map);
5842                         }
5843                         break;
5844                 case RELO_DATA:
5845                         map = &obj->maps[relo->map_idx];
5846                         insn[1].imm = insn[0].imm + relo->sym_off;
5847                         if (obj->gen_loader) {
5848                                 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5849                                 insn[0].imm = relo->map_idx;
5850                         } else if (map->autocreate) {
5851                                 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5852                                 insn[0].imm = map->fd;
5853                         } else {
5854                                 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5855                                                    relo->map_idx, map);
5856                         }
5857                         break;
5858                 case RELO_EXTERN_VAR:
5859                         ext = &obj->externs[relo->sym_off];
5860                         if (ext->type == EXT_KCFG) {
5861                                 if (obj->gen_loader) {
5862                                         insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5863                                         insn[0].imm = obj->kconfig_map_idx;
5864                                 } else {
5865                                         insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5866                                         insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5867                                 }
5868                                 insn[1].imm = ext->kcfg.data_off;
5869                         } else /* EXT_KSYM */ {
5870                                 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5871                                         insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5872                                         insn[0].imm = ext->ksym.kernel_btf_id;
5873                                         insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5874                                 } else { /* typeless ksyms or unresolved typed ksyms */
5875                                         insn[0].imm = (__u32)ext->ksym.addr;
5876                                         insn[1].imm = ext->ksym.addr >> 32;
5877                                 }
5878                         }
5879                         break;
5880                 case RELO_EXTERN_FUNC:
5881                         ext = &obj->externs[relo->sym_off];
5882                         insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
5883                         if (ext->is_set) {
5884                                 insn[0].imm = ext->ksym.kernel_btf_id;
5885                                 insn[0].off = ext->ksym.btf_fd_idx;
5886                         } else { /* unresolved weak kfunc */
5887                                 insn[0].imm = 0;
5888                                 insn[0].off = 0;
5889                         }
5890                         break;
5891                 case RELO_SUBPROG_ADDR:
5892                         if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
5893                                 pr_warn("prog '%s': relo #%d: bad insn\n",
5894                                         prog->name, i);
5895                                 return -EINVAL;
5896                         }
5897                         /* handled already */
5898                         break;
5899                 case RELO_CALL:
5900                         /* handled already */
5901                         break;
5902                 case RELO_CORE:
5903                         /* will be handled by bpf_program_record_relos() */
5904                         break;
5905                 default:
5906                         pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5907                                 prog->name, i, relo->type);
5908                         return -EINVAL;
5909                 }
5910         }
5911
5912         return 0;
5913 }
5914
5915 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5916                                     const struct bpf_program *prog,
5917                                     const struct btf_ext_info *ext_info,
5918                                     void **prog_info, __u32 *prog_rec_cnt,
5919                                     __u32 *prog_rec_sz)
5920 {
5921         void *copy_start = NULL, *copy_end = NULL;
5922         void *rec, *rec_end, *new_prog_info;
5923         const struct btf_ext_info_sec *sec;
5924         size_t old_sz, new_sz;
5925         int i, sec_num, sec_idx, off_adj;
5926
5927         sec_num = 0;
5928         for_each_btf_ext_sec(ext_info, sec) {
5929                 sec_idx = ext_info->sec_idxs[sec_num];
5930                 sec_num++;
5931                 if (prog->sec_idx != sec_idx)
5932                         continue;
5933
5934                 for_each_btf_ext_rec(ext_info, sec, i, rec) {
5935                         __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
5936
5937                         if (insn_off < prog->sec_insn_off)
5938                                 continue;
5939                         if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
5940                                 break;
5941
5942                         if (!copy_start)
5943                                 copy_start = rec;
5944                         copy_end = rec + ext_info->rec_size;
5945                 }
5946
5947                 if (!copy_start)
5948                         return -ENOENT;
5949
5950                 /* append func/line info of a given (sub-)program to the main
5951                  * program func/line info
5952                  */
5953                 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
5954                 new_sz = old_sz + (copy_end - copy_start);
5955                 new_prog_info = realloc(*prog_info, new_sz);
5956                 if (!new_prog_info)
5957                         return -ENOMEM;
5958                 *prog_info = new_prog_info;
5959                 *prog_rec_cnt = new_sz / ext_info->rec_size;
5960                 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
5961
5962                 /* Kernel instruction offsets are in units of 8-byte
5963                  * instructions, while .BTF.ext instruction offsets generated
5964                  * by Clang are in units of bytes. So convert Clang offsets
5965                  * into kernel offsets and adjust offset according to program
5966                  * relocated position.
5967                  */
5968                 off_adj = prog->sub_insn_off - prog->sec_insn_off;
5969                 rec = new_prog_info + old_sz;
5970                 rec_end = new_prog_info + new_sz;
5971                 for (; rec < rec_end; rec += ext_info->rec_size) {
5972                         __u32 *insn_off = rec;
5973
5974                         *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
5975                 }
5976                 *prog_rec_sz = ext_info->rec_size;
5977                 return 0;
5978         }
5979
5980         return -ENOENT;
5981 }
5982
5983 static int
5984 reloc_prog_func_and_line_info(const struct bpf_object *obj,
5985                               struct bpf_program *main_prog,
5986                               const struct bpf_program *prog)
5987 {
5988         int err;
5989
5990         /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
5991          * supprot func/line info
5992          */
5993         if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
5994                 return 0;
5995
5996         /* only attempt func info relocation if main program's func_info
5997          * relocation was successful
5998          */
5999         if (main_prog != prog && !main_prog->func_info)
6000                 goto line_info;
6001
6002         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6003                                        &main_prog->func_info,
6004                                        &main_prog->func_info_cnt,
6005                                        &main_prog->func_info_rec_size);
6006         if (err) {
6007                 if (err != -ENOENT) {
6008                         pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6009                                 prog->name, err);
6010                         return err;
6011                 }
6012                 if (main_prog->func_info) {
6013                         /*
6014                          * Some info has already been found but has problem
6015                          * in the last btf_ext reloc. Must have to error out.
6016                          */
6017                         pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6018                         return err;
6019                 }
6020                 /* Have problem loading the very first info. Ignore the rest. */
6021                 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6022                         prog->name);
6023         }
6024
6025 line_info:
6026         /* don't relocate line info if main program's relocation failed */
6027         if (main_prog != prog && !main_prog->line_info)
6028                 return 0;
6029
6030         err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6031                                        &main_prog->line_info,
6032                                        &main_prog->line_info_cnt,
6033                                        &main_prog->line_info_rec_size);
6034         if (err) {
6035                 if (err != -ENOENT) {
6036                         pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6037                                 prog->name, err);
6038                         return err;
6039                 }
6040                 if (main_prog->line_info) {
6041                         /*
6042                          * Some info has already been found but has problem
6043                          * in the last btf_ext reloc. Must have to error out.
6044                          */
6045                         pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6046                         return err;
6047                 }
6048                 /* Have problem loading the very first info. Ignore the rest. */
6049                 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6050                         prog->name);
6051         }
6052         return 0;
6053 }
6054
6055 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6056 {
6057         size_t insn_idx = *(const size_t *)key;
6058         const struct reloc_desc *relo = elem;
6059
6060         if (insn_idx == relo->insn_idx)
6061                 return 0;
6062         return insn_idx < relo->insn_idx ? -1 : 1;
6063 }
6064
6065 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6066 {
6067         if (!prog->nr_reloc)
6068                 return NULL;
6069         return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6070                        sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6071 }
6072
6073 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6074 {
6075         int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6076         struct reloc_desc *relos;
6077         int i;
6078
6079         if (main_prog == subprog)
6080                 return 0;
6081         relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6082         if (!relos)
6083                 return -ENOMEM;
6084         if (subprog->nr_reloc)
6085                 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6086                        sizeof(*relos) * subprog->nr_reloc);
6087
6088         for (i = main_prog->nr_reloc; i < new_cnt; i++)
6089                 relos[i].insn_idx += subprog->sub_insn_off;
6090         /* After insn_idx adjustment the 'relos' array is still sorted
6091          * by insn_idx and doesn't break bsearch.
6092          */
6093         main_prog->reloc_desc = relos;
6094         main_prog->nr_reloc = new_cnt;
6095         return 0;
6096 }
6097
6098 static int
6099 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6100                        struct bpf_program *prog)
6101 {
6102         size_t sub_insn_idx, insn_idx, new_cnt;
6103         struct bpf_program *subprog;
6104         struct bpf_insn *insns, *insn;
6105         struct reloc_desc *relo;
6106         int err;
6107
6108         err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6109         if (err)
6110                 return err;
6111
6112         for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6113                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6114                 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6115                         continue;
6116
6117                 relo = find_prog_insn_relo(prog, insn_idx);
6118                 if (relo && relo->type == RELO_EXTERN_FUNC)
6119                         /* kfunc relocations will be handled later
6120                          * in bpf_object__relocate_data()
6121                          */
6122                         continue;
6123                 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6124                         pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6125                                 prog->name, insn_idx, relo->type);
6126                         return -LIBBPF_ERRNO__RELOC;
6127                 }
6128                 if (relo) {
6129                         /* sub-program instruction index is a combination of
6130                          * an offset of a symbol pointed to by relocation and
6131                          * call instruction's imm field; for global functions,
6132                          * call always has imm = -1, but for static functions
6133                          * relocation is against STT_SECTION and insn->imm
6134                          * points to a start of a static function
6135                          *
6136                          * for subprog addr relocation, the relo->sym_off + insn->imm is
6137                          * the byte offset in the corresponding section.
6138                          */
6139                         if (relo->type == RELO_CALL)
6140                                 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6141                         else
6142                                 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6143                 } else if (insn_is_pseudo_func(insn)) {
6144                         /*
6145                          * RELO_SUBPROG_ADDR relo is always emitted even if both
6146                          * functions are in the same section, so it shouldn't reach here.
6147                          */
6148                         pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6149                                 prog->name, insn_idx);
6150                         return -LIBBPF_ERRNO__RELOC;
6151                 } else {
6152                         /* if subprogram call is to a static function within
6153                          * the same ELF section, there won't be any relocation
6154                          * emitted, but it also means there is no additional
6155                          * offset necessary, insns->imm is relative to
6156                          * instruction's original position within the section
6157                          */
6158                         sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6159                 }
6160
6161                 /* we enforce that sub-programs should be in .text section */
6162                 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6163                 if (!subprog) {
6164                         pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6165                                 prog->name);
6166                         return -LIBBPF_ERRNO__RELOC;
6167                 }
6168
6169                 /* if it's the first call instruction calling into this
6170                  * subprogram (meaning this subprog hasn't been processed
6171                  * yet) within the context of current main program:
6172                  *   - append it at the end of main program's instructions blog;
6173                  *   - process is recursively, while current program is put on hold;
6174                  *   - if that subprogram calls some other not yet processes
6175                  *   subprogram, same thing will happen recursively until
6176                  *   there are no more unprocesses subprograms left to append
6177                  *   and relocate.
6178                  */
6179                 if (subprog->sub_insn_off == 0) {
6180                         subprog->sub_insn_off = main_prog->insns_cnt;
6181
6182                         new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6183                         insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6184                         if (!insns) {
6185                                 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6186                                 return -ENOMEM;
6187                         }
6188                         main_prog->insns = insns;
6189                         main_prog->insns_cnt = new_cnt;
6190
6191                         memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6192                                subprog->insns_cnt * sizeof(*insns));
6193
6194                         pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6195                                  main_prog->name, subprog->insns_cnt, subprog->name);
6196
6197                         /* The subprog insns are now appended. Append its relos too. */
6198                         err = append_subprog_relos(main_prog, subprog);
6199                         if (err)
6200                                 return err;
6201                         err = bpf_object__reloc_code(obj, main_prog, subprog);
6202                         if (err)
6203                                 return err;
6204                 }
6205
6206                 /* main_prog->insns memory could have been re-allocated, so
6207                  * calculate pointer again
6208                  */
6209                 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6210                 /* calculate correct instruction position within current main
6211                  * prog; each main prog can have a different set of
6212                  * subprograms appended (potentially in different order as
6213                  * well), so position of any subprog can be different for
6214                  * different main programs
6215                  */
6216                 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6217
6218                 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6219                          prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6220         }
6221
6222         return 0;
6223 }
6224
6225 /*
6226  * Relocate sub-program calls.
6227  *
6228  * Algorithm operates as follows. Each entry-point BPF program (referred to as
6229  * main prog) is processed separately. For each subprog (non-entry functions,
6230  * that can be called from either entry progs or other subprogs) gets their
6231  * sub_insn_off reset to zero. This serves as indicator that this subprogram
6232  * hasn't been yet appended and relocated within current main prog. Once its
6233  * relocated, sub_insn_off will point at the position within current main prog
6234  * where given subprog was appended. This will further be used to relocate all
6235  * the call instructions jumping into this subprog.
6236  *
6237  * We start with main program and process all call instructions. If the call
6238  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6239  * is zero), subprog instructions are appended at the end of main program's
6240  * instruction array. Then main program is "put on hold" while we recursively
6241  * process newly appended subprogram. If that subprogram calls into another
6242  * subprogram that hasn't been appended, new subprogram is appended again to
6243  * the *main* prog's instructions (subprog's instructions are always left
6244  * untouched, as they need to be in unmodified state for subsequent main progs
6245  * and subprog instructions are always sent only as part of a main prog) and
6246  * the process continues recursively. Once all the subprogs called from a main
6247  * prog or any of its subprogs are appended (and relocated), all their
6248  * positions within finalized instructions array are known, so it's easy to
6249  * rewrite call instructions with correct relative offsets, corresponding to
6250  * desired target subprog.
6251  *
6252  * Its important to realize that some subprogs might not be called from some
6253  * main prog and any of its called/used subprogs. Those will keep their
6254  * subprog->sub_insn_off as zero at all times and won't be appended to current
6255  * main prog and won't be relocated within the context of current main prog.
6256  * They might still be used from other main progs later.
6257  *
6258  * Visually this process can be shown as below. Suppose we have two main
6259  * programs mainA and mainB and BPF object contains three subprogs: subA,
6260  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6261  * subC both call subB:
6262  *
6263  *        +--------+ +-------+
6264  *        |        v v       |
6265  *     +--+---+ +--+-+-+ +---+--+
6266  *     | subA | | subB | | subC |
6267  *     +--+---+ +------+ +---+--+
6268  *        ^                  ^
6269  *        |                  |
6270  *    +---+-------+   +------+----+
6271  *    |   mainA   |   |   mainB   |
6272  *    +-----------+   +-----------+
6273  *
6274  * We'll start relocating mainA, will find subA, append it and start
6275  * processing sub A recursively:
6276  *
6277  *    +-----------+------+
6278  *    |   mainA   | subA |
6279  *    +-----------+------+
6280  *
6281  * At this point we notice that subB is used from subA, so we append it and
6282  * relocate (there are no further subcalls from subB):
6283  *
6284  *    +-----------+------+------+
6285  *    |   mainA   | subA | subB |
6286  *    +-----------+------+------+
6287  *
6288  * At this point, we relocate subA calls, then go one level up and finish with
6289  * relocatin mainA calls. mainA is done.
6290  *
6291  * For mainB process is similar but results in different order. We start with
6292  * mainB and skip subA and subB, as mainB never calls them (at least
6293  * directly), but we see subC is needed, so we append and start processing it:
6294  *
6295  *    +-----------+------+
6296  *    |   mainB   | subC |
6297  *    +-----------+------+
6298  * Now we see subC needs subB, so we go back to it, append and relocate it:
6299  *
6300  *    +-----------+------+------+
6301  *    |   mainB   | subC | subB |
6302  *    +-----------+------+------+
6303  *
6304  * At this point we unwind recursion, relocate calls in subC, then in mainB.
6305  */
6306 static int
6307 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6308 {
6309         struct bpf_program *subprog;
6310         int i, err;
6311
6312         /* mark all subprogs as not relocated (yet) within the context of
6313          * current main program
6314          */
6315         for (i = 0; i < obj->nr_programs; i++) {
6316                 subprog = &obj->programs[i];
6317                 if (!prog_is_subprog(obj, subprog))
6318                         continue;
6319
6320                 subprog->sub_insn_off = 0;
6321         }
6322
6323         err = bpf_object__reloc_code(obj, prog, prog);
6324         if (err)
6325                 return err;
6326
6327         return 0;
6328 }
6329
6330 static void
6331 bpf_object__free_relocs(struct bpf_object *obj)
6332 {
6333         struct bpf_program *prog;
6334         int i;
6335
6336         /* free up relocation descriptors */
6337         for (i = 0; i < obj->nr_programs; i++) {
6338                 prog = &obj->programs[i];
6339                 zfree(&prog->reloc_desc);
6340                 prog->nr_reloc = 0;
6341         }
6342 }
6343
6344 static int cmp_relocs(const void *_a, const void *_b)
6345 {
6346         const struct reloc_desc *a = _a;
6347         const struct reloc_desc *b = _b;
6348
6349         if (a->insn_idx != b->insn_idx)
6350                 return a->insn_idx < b->insn_idx ? -1 : 1;
6351
6352         /* no two relocations should have the same insn_idx, but ... */
6353         if (a->type != b->type)
6354                 return a->type < b->type ? -1 : 1;
6355
6356         return 0;
6357 }
6358
6359 static void bpf_object__sort_relos(struct bpf_object *obj)
6360 {
6361         int i;
6362
6363         for (i = 0; i < obj->nr_programs; i++) {
6364                 struct bpf_program *p = &obj->programs[i];
6365
6366                 if (!p->nr_reloc)
6367                         continue;
6368
6369                 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6370         }
6371 }
6372
6373 static int
6374 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6375 {
6376         struct bpf_program *prog;
6377         size_t i, j;
6378         int err;
6379
6380         if (obj->btf_ext) {
6381                 err = bpf_object__relocate_core(obj, targ_btf_path);
6382                 if (err) {
6383                         pr_warn("failed to perform CO-RE relocations: %d\n",
6384                                 err);
6385                         return err;
6386                 }
6387                 bpf_object__sort_relos(obj);
6388         }
6389
6390         /* Before relocating calls pre-process relocations and mark
6391          * few ld_imm64 instructions that points to subprogs.
6392          * Otherwise bpf_object__reloc_code() later would have to consider
6393          * all ld_imm64 insns as relocation candidates. That would
6394          * reduce relocation speed, since amount of find_prog_insn_relo()
6395          * would increase and most of them will fail to find a relo.
6396          */
6397         for (i = 0; i < obj->nr_programs; i++) {
6398                 prog = &obj->programs[i];
6399                 for (j = 0; j < prog->nr_reloc; j++) {
6400                         struct reloc_desc *relo = &prog->reloc_desc[j];
6401                         struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6402
6403                         /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6404                         if (relo->type == RELO_SUBPROG_ADDR)
6405                                 insn[0].src_reg = BPF_PSEUDO_FUNC;
6406                 }
6407         }
6408
6409         /* relocate subprogram calls and append used subprograms to main
6410          * programs; each copy of subprogram code needs to be relocated
6411          * differently for each main program, because its code location might
6412          * have changed.
6413          * Append subprog relos to main programs to allow data relos to be
6414          * processed after text is completely relocated.
6415          */
6416         for (i = 0; i < obj->nr_programs; i++) {
6417                 prog = &obj->programs[i];
6418                 /* sub-program's sub-calls are relocated within the context of
6419                  * its main program only
6420                  */
6421                 if (prog_is_subprog(obj, prog))
6422                         continue;
6423                 if (!prog->autoload)
6424                         continue;
6425
6426                 err = bpf_object__relocate_calls(obj, prog);
6427                 if (err) {
6428                         pr_warn("prog '%s': failed to relocate calls: %d\n",
6429                                 prog->name, err);
6430                         return err;
6431                 }
6432         }
6433         /* Process data relos for main programs */
6434         for (i = 0; i < obj->nr_programs; i++) {
6435                 prog = &obj->programs[i];
6436                 if (prog_is_subprog(obj, prog))
6437                         continue;
6438                 if (!prog->autoload)
6439                         continue;
6440                 err = bpf_object__relocate_data(obj, prog);
6441                 if (err) {
6442                         pr_warn("prog '%s': failed to relocate data references: %d\n",
6443                                 prog->name, err);
6444                         return err;
6445                 }
6446         }
6447
6448         return 0;
6449 }
6450
6451 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6452                                             Elf64_Shdr *shdr, Elf_Data *data);
6453
6454 static int bpf_object__collect_map_relos(struct bpf_object *obj,
6455                                          Elf64_Shdr *shdr, Elf_Data *data)
6456 {
6457         const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6458         int i, j, nrels, new_sz;
6459         const struct btf_var_secinfo *vi = NULL;
6460         const struct btf_type *sec, *var, *def;
6461         struct bpf_map *map = NULL, *targ_map = NULL;
6462         struct bpf_program *targ_prog = NULL;
6463         bool is_prog_array, is_map_in_map;
6464         const struct btf_member *member;
6465         const char *name, *mname, *type;
6466         unsigned int moff;
6467         Elf64_Sym *sym;
6468         Elf64_Rel *rel;
6469         void *tmp;
6470
6471         if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6472                 return -EINVAL;
6473         sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6474         if (!sec)
6475                 return -EINVAL;
6476
6477         nrels = shdr->sh_size / shdr->sh_entsize;
6478         for (i = 0; i < nrels; i++) {
6479                 rel = elf_rel_by_idx(data, i);
6480                 if (!rel) {
6481                         pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6482                         return -LIBBPF_ERRNO__FORMAT;
6483                 }
6484
6485                 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
6486                 if (!sym) {
6487                         pr_warn(".maps relo #%d: symbol %zx not found\n",
6488                                 i, (size_t)ELF64_R_SYM(rel->r_info));
6489                         return -LIBBPF_ERRNO__FORMAT;
6490                 }
6491                 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
6492
6493                 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
6494                          i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
6495                          (size_t)rel->r_offset, sym->st_name, name);
6496
6497                 for (j = 0; j < obj->nr_maps; j++) {
6498                         map = &obj->maps[j];
6499                         if (map->sec_idx != obj->efile.btf_maps_shndx)
6500                                 continue;
6501
6502                         vi = btf_var_secinfos(sec) + map->btf_var_idx;
6503                         if (vi->offset <= rel->r_offset &&
6504                             rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6505                                 break;
6506                 }
6507                 if (j == obj->nr_maps) {
6508                         pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
6509                                 i, name, (size_t)rel->r_offset);
6510                         return -EINVAL;
6511                 }
6512
6513                 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
6514                 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
6515                 type = is_map_in_map ? "map" : "prog";
6516                 if (is_map_in_map) {
6517                         if (sym->st_shndx != obj->efile.btf_maps_shndx) {
6518                                 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6519                                         i, name);
6520                                 return -LIBBPF_ERRNO__RELOC;
6521                         }
6522                         if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6523                             map->def.key_size != sizeof(int)) {
6524                                 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6525                                         i, map->name, sizeof(int));
6526                                 return -EINVAL;
6527                         }
6528                         targ_map = bpf_object__find_map_by_name(obj, name);
6529                         if (!targ_map) {
6530                                 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
6531                                         i, name);
6532                                 return -ESRCH;
6533                         }
6534                 } else if (is_prog_array) {
6535                         targ_prog = bpf_object__find_program_by_name(obj, name);
6536                         if (!targ_prog) {
6537                                 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
6538                                         i, name);
6539                                 return -ESRCH;
6540                         }
6541                         if (targ_prog->sec_idx != sym->st_shndx ||
6542                             targ_prog->sec_insn_off * 8 != sym->st_value ||
6543                             prog_is_subprog(obj, targ_prog)) {
6544                                 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
6545                                         i, name);
6546                                 return -LIBBPF_ERRNO__RELOC;
6547                         }
6548                 } else {
6549                         return -EINVAL;
6550                 }
6551
6552                 var = btf__type_by_id(obj->btf, vi->type);
6553                 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6554                 if (btf_vlen(def) == 0)
6555                         return -EINVAL;
6556                 member = btf_members(def) + btf_vlen(def) - 1;
6557                 mname = btf__name_by_offset(obj->btf, member->name_off);
6558                 if (strcmp(mname, "values"))
6559                         return -EINVAL;
6560
6561                 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6562                 if (rel->r_offset - vi->offset < moff)
6563                         return -EINVAL;
6564
6565                 moff = rel->r_offset - vi->offset - moff;
6566                 /* here we use BPF pointer size, which is always 64 bit, as we
6567                  * are parsing ELF that was built for BPF target
6568                  */
6569                 if (moff % bpf_ptr_sz)
6570                         return -EINVAL;
6571                 moff /= bpf_ptr_sz;
6572                 if (moff >= map->init_slots_sz) {
6573                         new_sz = moff + 1;
6574                         tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6575                         if (!tmp)
6576                                 return -ENOMEM;
6577                         map->init_slots = tmp;
6578                         memset(map->init_slots + map->init_slots_sz, 0,
6579                                (new_sz - map->init_slots_sz) * host_ptr_sz);
6580                         map->init_slots_sz = new_sz;
6581                 }
6582                 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
6583
6584                 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
6585                          i, map->name, moff, type, name);
6586         }
6587
6588         return 0;
6589 }
6590
6591 static int bpf_object__collect_relos(struct bpf_object *obj)
6592 {
6593         int i, err;
6594
6595         for (i = 0; i < obj->efile.sec_cnt; i++) {
6596                 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
6597                 Elf64_Shdr *shdr;
6598                 Elf_Data *data;
6599                 int idx;
6600
6601                 if (sec_desc->sec_type != SEC_RELO)
6602                         continue;
6603
6604                 shdr = sec_desc->shdr;
6605                 data = sec_desc->data;
6606                 idx = shdr->sh_info;
6607
6608                 if (shdr->sh_type != SHT_REL) {
6609                         pr_warn("internal error at %d\n", __LINE__);
6610                         return -LIBBPF_ERRNO__INTERNAL;
6611                 }
6612
6613                 if (idx == obj->efile.st_ops_shndx)
6614                         err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6615                 else if (idx == obj->efile.btf_maps_shndx)
6616                         err = bpf_object__collect_map_relos(obj, shdr, data);
6617                 else
6618                         err = bpf_object__collect_prog_relos(obj, shdr, data);
6619                 if (err)
6620                         return err;
6621         }
6622
6623         bpf_object__sort_relos(obj);
6624         return 0;
6625 }
6626
6627 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6628 {
6629         if (BPF_CLASS(insn->code) == BPF_JMP &&
6630             BPF_OP(insn->code) == BPF_CALL &&
6631             BPF_SRC(insn->code) == BPF_K &&
6632             insn->src_reg == 0 &&
6633             insn->dst_reg == 0) {
6634                     *func_id = insn->imm;
6635                     return true;
6636         }
6637         return false;
6638 }
6639
6640 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
6641 {
6642         struct bpf_insn *insn = prog->insns;
6643         enum bpf_func_id func_id;
6644         int i;
6645
6646         if (obj->gen_loader)
6647                 return 0;
6648
6649         for (i = 0; i < prog->insns_cnt; i++, insn++) {
6650                 if (!insn_is_helper_call(insn, &func_id))
6651                         continue;
6652
6653                 /* on kernels that don't yet support
6654                  * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6655                  * to bpf_probe_read() which works well for old kernels
6656                  */
6657                 switch (func_id) {
6658                 case BPF_FUNC_probe_read_kernel:
6659                 case BPF_FUNC_probe_read_user:
6660                         if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6661                                 insn->imm = BPF_FUNC_probe_read;
6662                         break;
6663                 case BPF_FUNC_probe_read_kernel_str:
6664                 case BPF_FUNC_probe_read_user_str:
6665                         if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6666                                 insn->imm = BPF_FUNC_probe_read_str;
6667                         break;
6668                 default:
6669                         break;
6670                 }
6671         }
6672         return 0;
6673 }
6674
6675 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
6676                                      int *btf_obj_fd, int *btf_type_id);
6677
6678 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
6679 static int libbpf_prepare_prog_load(struct bpf_program *prog,
6680                                     struct bpf_prog_load_opts *opts, long cookie)
6681 {
6682         enum sec_def_flags def = cookie;
6683
6684         /* old kernels might not support specifying expected_attach_type */
6685         if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
6686                 opts->expected_attach_type = 0;
6687
6688         if (def & SEC_SLEEPABLE)
6689                 opts->prog_flags |= BPF_F_SLEEPABLE;
6690
6691         if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
6692                 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
6693
6694         if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
6695                 int btf_obj_fd = 0, btf_type_id = 0, err;
6696                 const char *attach_name;
6697
6698                 attach_name = strchr(prog->sec_name, '/');
6699                 if (!attach_name) {
6700                         /* if BPF program is annotated with just SEC("fentry")
6701                          * (or similar) without declaratively specifying
6702                          * target, then it is expected that target will be
6703                          * specified with bpf_program__set_attach_target() at
6704                          * runtime before BPF object load step. If not, then
6705                          * there is nothing to load into the kernel as BPF
6706                          * verifier won't be able to validate BPF program
6707                          * correctness anyways.
6708                          */
6709                         pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
6710                                 prog->name);
6711                         return -EINVAL;
6712                 }
6713                 attach_name++; /* skip over / */
6714
6715                 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
6716                 if (err)
6717                         return err;
6718
6719                 /* cache resolved BTF FD and BTF type ID in the prog */
6720                 prog->attach_btf_obj_fd = btf_obj_fd;
6721                 prog->attach_btf_id = btf_type_id;
6722
6723                 /* but by now libbpf common logic is not utilizing
6724                  * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
6725                  * this callback is called after opts were populated by
6726                  * libbpf, so this callback has to update opts explicitly here
6727                  */
6728                 opts->attach_btf_obj_fd = btf_obj_fd;
6729                 opts->attach_btf_id = btf_type_id;
6730         }
6731         return 0;
6732 }
6733
6734 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
6735
6736 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
6737                                 struct bpf_insn *insns, int insns_cnt,
6738                                 const char *license, __u32 kern_version, int *prog_fd)
6739 {
6740         LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
6741         const char *prog_name = NULL;
6742         char *cp, errmsg[STRERR_BUFSIZE];
6743         size_t log_buf_size = 0;
6744         char *log_buf = NULL, *tmp;
6745         int btf_fd, ret, err;
6746         bool own_log_buf = true;
6747         __u32 log_level = prog->log_level;
6748
6749         if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6750                 /*
6751                  * The program type must be set.  Most likely we couldn't find a proper
6752                  * section definition at load time, and thus we didn't infer the type.
6753                  */
6754                 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6755                         prog->name, prog->sec_name);
6756                 return -EINVAL;
6757         }
6758
6759         if (!insns || !insns_cnt)
6760                 return -EINVAL;
6761
6762         load_attr.expected_attach_type = prog->expected_attach_type;
6763         if (kernel_supports(obj, FEAT_PROG_NAME))
6764                 prog_name = prog->name;
6765         load_attr.attach_prog_fd = prog->attach_prog_fd;
6766         load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6767         load_attr.attach_btf_id = prog->attach_btf_id;
6768         load_attr.kern_version = kern_version;
6769         load_attr.prog_ifindex = prog->prog_ifindex;
6770
6771         /* specify func_info/line_info only if kernel supports them */
6772         btf_fd = bpf_object__btf_fd(obj);
6773         if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
6774                 load_attr.prog_btf_fd = btf_fd;
6775                 load_attr.func_info = prog->func_info;
6776                 load_attr.func_info_rec_size = prog->func_info_rec_size;
6777                 load_attr.func_info_cnt = prog->func_info_cnt;
6778                 load_attr.line_info = prog->line_info;
6779                 load_attr.line_info_rec_size = prog->line_info_rec_size;
6780                 load_attr.line_info_cnt = prog->line_info_cnt;
6781         }
6782         load_attr.log_level = log_level;
6783         load_attr.prog_flags = prog->prog_flags;
6784         load_attr.fd_array = obj->fd_array;
6785
6786         /* adjust load_attr if sec_def provides custom preload callback */
6787         if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
6788                 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
6789                 if (err < 0) {
6790                         pr_warn("prog '%s': failed to prepare load attributes: %d\n",
6791                                 prog->name, err);
6792                         return err;
6793                 }
6794                 insns = prog->insns;
6795                 insns_cnt = prog->insns_cnt;
6796         }
6797
6798         if (obj->gen_loader) {
6799                 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
6800                                    license, insns, insns_cnt, &load_attr,
6801                                    prog - obj->programs);
6802                 *prog_fd = -1;
6803                 return 0;
6804         }
6805
6806 retry_load:
6807         /* if log_level is zero, we don't request logs initially even if
6808          * custom log_buf is specified; if the program load fails, then we'll
6809          * bump log_level to 1 and use either custom log_buf or we'll allocate
6810          * our own and retry the load to get details on what failed
6811          */
6812         if (log_level) {
6813                 if (prog->log_buf) {
6814                         log_buf = prog->log_buf;
6815                         log_buf_size = prog->log_size;
6816                         own_log_buf = false;
6817                 } else if (obj->log_buf) {
6818                         log_buf = obj->log_buf;
6819                         log_buf_size = obj->log_size;
6820                         own_log_buf = false;
6821                 } else {
6822                         log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
6823                         tmp = realloc(log_buf, log_buf_size);
6824                         if (!tmp) {
6825                                 ret = -ENOMEM;
6826                                 goto out;
6827                         }
6828                         log_buf = tmp;
6829                         log_buf[0] = '\0';
6830                         own_log_buf = true;
6831                 }
6832         }
6833
6834         load_attr.log_buf = log_buf;
6835         load_attr.log_size = log_buf_size;
6836         load_attr.log_level = log_level;
6837
6838         ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
6839         if (ret >= 0) {
6840                 if (log_level && own_log_buf) {
6841                         pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6842                                  prog->name, log_buf);
6843                 }
6844
6845                 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
6846                         struct bpf_map *map;
6847                         int i;
6848
6849                         for (i = 0; i < obj->nr_maps; i++) {
6850                                 map = &prog->obj->maps[i];
6851                                 if (map->libbpf_type != LIBBPF_MAP_RODATA)
6852                                         continue;
6853
6854                                 if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
6855                                         cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6856                                         pr_warn("prog '%s': failed to bind map '%s': %s\n",
6857                                                 prog->name, map->real_name, cp);
6858                                         /* Don't fail hard if can't bind rodata. */
6859                                 }
6860                         }
6861                 }
6862
6863                 *prog_fd = ret;
6864                 ret = 0;
6865                 goto out;
6866         }
6867
6868         if (log_level == 0) {
6869                 log_level = 1;
6870                 goto retry_load;
6871         }
6872         /* On ENOSPC, increase log buffer size and retry, unless custom
6873          * log_buf is specified.
6874          * Be careful to not overflow u32, though. Kernel's log buf size limit
6875          * isn't part of UAPI so it can always be bumped to full 4GB. So don't
6876          * multiply by 2 unless we are sure we'll fit within 32 bits.
6877          * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
6878          */
6879         if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
6880                 goto retry_load;
6881
6882         ret = -errno;
6883
6884         /* post-process verifier log to improve error descriptions */
6885         fixup_verifier_log(prog, log_buf, log_buf_size);
6886
6887         cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6888         pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
6889         pr_perm_msg(ret);
6890
6891         if (own_log_buf && log_buf && log_buf[0] != '\0') {
6892                 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6893                         prog->name, log_buf);
6894         }
6895
6896 out:
6897         if (own_log_buf)
6898                 free(log_buf);
6899         return ret;
6900 }
6901
6902 static char *find_prev_line(char *buf, char *cur)
6903 {
6904         char *p;
6905
6906         if (cur == buf) /* end of a log buf */
6907                 return NULL;
6908
6909         p = cur - 1;
6910         while (p - 1 >= buf && *(p - 1) != '\n')
6911                 p--;
6912
6913         return p;
6914 }
6915
6916 static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
6917                       char *orig, size_t orig_sz, const char *patch)
6918 {
6919         /* size of the remaining log content to the right from the to-be-replaced part */
6920         size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
6921         size_t patch_sz = strlen(patch);
6922
6923         if (patch_sz != orig_sz) {
6924                 /* If patch line(s) are longer than original piece of verifier log,
6925                  * shift log contents by (patch_sz - orig_sz) bytes to the right
6926                  * starting from after to-be-replaced part of the log.
6927                  *
6928                  * If patch line(s) are shorter than original piece of verifier log,
6929                  * shift log contents by (orig_sz - patch_sz) bytes to the left
6930                  * starting from after to-be-replaced part of the log
6931                  *
6932                  * We need to be careful about not overflowing available
6933                  * buf_sz capacity. If that's the case, we'll truncate the end
6934                  * of the original log, as necessary.
6935                  */
6936                 if (patch_sz > orig_sz) {
6937                         if (orig + patch_sz >= buf + buf_sz) {
6938                                 /* patch is big enough to cover remaining space completely */
6939                                 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
6940                                 rem_sz = 0;
6941                         } else if (patch_sz - orig_sz > buf_sz - log_sz) {
6942                                 /* patch causes part of remaining log to be truncated */
6943                                 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
6944                         }
6945                 }
6946                 /* shift remaining log to the right by calculated amount */
6947                 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
6948         }
6949
6950         memcpy(orig, patch, patch_sz);
6951 }
6952
6953 static void fixup_log_failed_core_relo(struct bpf_program *prog,
6954                                        char *buf, size_t buf_sz, size_t log_sz,
6955                                        char *line1, char *line2, char *line3)
6956 {
6957         /* Expected log for failed and not properly guarded CO-RE relocation:
6958          * line1 -> 123: (85) call unknown#195896080
6959          * line2 -> invalid func unknown#195896080
6960          * line3 -> <anything else or end of buffer>
6961          *
6962          * "123" is the index of the instruction that was poisoned. We extract
6963          * instruction index to find corresponding CO-RE relocation and
6964          * replace this part of the log with more relevant information about
6965          * failed CO-RE relocation.
6966          */
6967         const struct bpf_core_relo *relo;
6968         struct bpf_core_spec spec;
6969         char patch[512], spec_buf[256];
6970         int insn_idx, err, spec_len;
6971
6972         if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
6973                 return;
6974
6975         relo = find_relo_core(prog, insn_idx);
6976         if (!relo)
6977                 return;
6978
6979         err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
6980         if (err)
6981                 return;
6982
6983         spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
6984         snprintf(patch, sizeof(patch),
6985                  "%d: <invalid CO-RE relocation>\n"
6986                  "failed to resolve CO-RE relocation %s%s\n",
6987                  insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
6988
6989         patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
6990 }
6991
6992 static void fixup_log_missing_map_load(struct bpf_program *prog,
6993                                        char *buf, size_t buf_sz, size_t log_sz,
6994                                        char *line1, char *line2, char *line3)
6995 {
6996         /* Expected log for failed and not properly guarded CO-RE relocation:
6997          * line1 -> 123: (85) call unknown#2001000345
6998          * line2 -> invalid func unknown#2001000345
6999          * line3 -> <anything else or end of buffer>
7000          *
7001          * "123" is the index of the instruction that was poisoned.
7002          * "345" in "2001000345" are map index in obj->maps to fetch map name.
7003          */
7004         struct bpf_object *obj = prog->obj;
7005         const struct bpf_map *map;
7006         int insn_idx, map_idx;
7007         char patch[128];
7008
7009         if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7010                 return;
7011
7012         map_idx -= MAP_LDIMM64_POISON_BASE;
7013         if (map_idx < 0 || map_idx >= obj->nr_maps)
7014                 return;
7015         map = &obj->maps[map_idx];
7016
7017         snprintf(patch, sizeof(patch),
7018                  "%d: <invalid BPF map reference>\n"
7019                  "BPF map '%s' is referenced but wasn't created\n",
7020                  insn_idx, map->name);
7021
7022         patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7023 }
7024
7025 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7026 {
7027         /* look for familiar error patterns in last N lines of the log */
7028         const size_t max_last_line_cnt = 10;
7029         char *prev_line, *cur_line, *next_line;
7030         size_t log_sz;
7031         int i;
7032
7033         if (!buf)
7034                 return;
7035
7036         log_sz = strlen(buf) + 1;
7037         next_line = buf + log_sz - 1;
7038
7039         for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7040                 cur_line = find_prev_line(buf, next_line);
7041                 if (!cur_line)
7042                         return;
7043
7044                 /* failed CO-RE relocation case */
7045                 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7046                         prev_line = find_prev_line(buf, cur_line);
7047                         if (!prev_line)
7048                                 continue;
7049
7050                         fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7051                                                    prev_line, cur_line, next_line);
7052                         return;
7053                 } else if (str_has_pfx(cur_line, "invalid func unknown#"MAP_LDIMM64_POISON_PFX)) {
7054                         prev_line = find_prev_line(buf, cur_line);
7055                         if (!prev_line)
7056                                 continue;
7057
7058                         fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7059                                                    prev_line, cur_line, next_line);
7060                         return;
7061                 }
7062         }
7063 }
7064
7065 static int bpf_program_record_relos(struct bpf_program *prog)
7066 {
7067         struct bpf_object *obj = prog->obj;
7068         int i;
7069
7070         for (i = 0; i < prog->nr_reloc; i++) {
7071                 struct reloc_desc *relo = &prog->reloc_desc[i];
7072                 struct extern_desc *ext = &obj->externs[relo->sym_off];
7073
7074                 switch (relo->type) {
7075                 case RELO_EXTERN_VAR:
7076                         if (ext->type != EXT_KSYM)
7077                                 continue;
7078                         bpf_gen__record_extern(obj->gen_loader, ext->name,
7079                                                ext->is_weak, !ext->ksym.type_id,
7080                                                BTF_KIND_VAR, relo->insn_idx);
7081                         break;
7082                 case RELO_EXTERN_FUNC:
7083                         bpf_gen__record_extern(obj->gen_loader, ext->name,
7084                                                ext->is_weak, false, BTF_KIND_FUNC,
7085                                                relo->insn_idx);
7086                         break;
7087                 case RELO_CORE: {
7088                         struct bpf_core_relo cr = {
7089                                 .insn_off = relo->insn_idx * 8,
7090                                 .type_id = relo->core_relo->type_id,
7091                                 .access_str_off = relo->core_relo->access_str_off,
7092                                 .kind = relo->core_relo->kind,
7093                         };
7094
7095                         bpf_gen__record_relo_core(obj->gen_loader, &cr);
7096                         break;
7097                 }
7098                 default:
7099                         continue;
7100                 }
7101         }
7102         return 0;
7103 }
7104
7105 static int
7106 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7107 {
7108         struct bpf_program *prog;
7109         size_t i;
7110         int err;
7111
7112         for (i = 0; i < obj->nr_programs; i++) {
7113                 prog = &obj->programs[i];
7114                 err = bpf_object__sanitize_prog(obj, prog);
7115                 if (err)
7116                         return err;
7117         }
7118
7119         for (i = 0; i < obj->nr_programs; i++) {
7120                 prog = &obj->programs[i];
7121                 if (prog_is_subprog(obj, prog))
7122                         continue;
7123                 if (!prog->autoload) {
7124                         pr_debug("prog '%s': skipped loading\n", prog->name);
7125                         continue;
7126                 }
7127                 prog->log_level |= log_level;
7128
7129                 if (obj->gen_loader)
7130                         bpf_program_record_relos(prog);
7131
7132                 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7133                                            obj->license, obj->kern_version, &prog->fd);
7134                 if (err) {
7135                         pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
7136                         return err;
7137                 }
7138         }
7139
7140         bpf_object__free_relocs(obj);
7141         return 0;
7142 }
7143
7144 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7145
7146 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7147 {
7148         struct bpf_program *prog;
7149         int err;
7150
7151         bpf_object__for_each_program(prog, obj) {
7152                 prog->sec_def = find_sec_def(prog->sec_name);
7153                 if (!prog->sec_def) {
7154                         /* couldn't guess, but user might manually specify */
7155                         pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7156                                 prog->name, prog->sec_name);
7157                         continue;
7158                 }
7159
7160                 prog->type = prog->sec_def->prog_type;
7161                 prog->expected_attach_type = prog->sec_def->expected_attach_type;
7162
7163                 /* sec_def can have custom callback which should be called
7164                  * after bpf_program is initialized to adjust its properties
7165                  */
7166                 if (prog->sec_def->prog_setup_fn) {
7167                         err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7168                         if (err < 0) {
7169                                 pr_warn("prog '%s': failed to initialize: %d\n",
7170                                         prog->name, err);
7171                                 return err;
7172                         }
7173                 }
7174         }
7175
7176         return 0;
7177 }
7178
7179 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7180                                           const struct bpf_object_open_opts *opts)
7181 {
7182         const char *obj_name, *kconfig, *btf_tmp_path;
7183         struct bpf_object *obj;
7184         char tmp_name[64];
7185         int err;
7186         char *log_buf;
7187         size_t log_size;
7188         __u32 log_level;
7189
7190         if (elf_version(EV_CURRENT) == EV_NONE) {
7191                 pr_warn("failed to init libelf for %s\n",
7192                         path ? : "(mem buf)");
7193                 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7194         }
7195
7196         if (!OPTS_VALID(opts, bpf_object_open_opts))
7197                 return ERR_PTR(-EINVAL);
7198
7199         obj_name = OPTS_GET(opts, object_name, NULL);
7200         if (obj_buf) {
7201                 if (!obj_name) {
7202                         snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7203                                  (unsigned long)obj_buf,
7204                                  (unsigned long)obj_buf_sz);
7205                         obj_name = tmp_name;
7206                 }
7207                 path = obj_name;
7208                 pr_debug("loading object '%s' from buffer\n", obj_name);
7209         }
7210
7211         log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7212         log_size = OPTS_GET(opts, kernel_log_size, 0);
7213         log_level = OPTS_GET(opts, kernel_log_level, 0);
7214         if (log_size > UINT_MAX)
7215                 return ERR_PTR(-EINVAL);
7216         if (log_size && !log_buf)
7217                 return ERR_PTR(-EINVAL);
7218
7219         obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7220         if (IS_ERR(obj))
7221                 return obj;
7222
7223         obj->log_buf = log_buf;
7224         obj->log_size = log_size;
7225         obj->log_level = log_level;
7226
7227         btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7228         if (btf_tmp_path) {
7229                 if (strlen(btf_tmp_path) >= PATH_MAX) {
7230                         err = -ENAMETOOLONG;
7231                         goto out;
7232                 }
7233                 obj->btf_custom_path = strdup(btf_tmp_path);
7234                 if (!obj->btf_custom_path) {
7235                         err = -ENOMEM;
7236                         goto out;
7237                 }
7238         }
7239
7240         kconfig = OPTS_GET(opts, kconfig, NULL);
7241         if (kconfig) {
7242                 obj->kconfig = strdup(kconfig);
7243                 if (!obj->kconfig) {
7244                         err = -ENOMEM;
7245                         goto out;
7246                 }
7247         }
7248
7249         err = bpf_object__elf_init(obj);
7250         err = err ? : bpf_object__check_endianness(obj);
7251         err = err ? : bpf_object__elf_collect(obj);
7252         err = err ? : bpf_object__collect_externs(obj);
7253         err = err ? : bpf_object_fixup_btf(obj);
7254         err = err ? : bpf_object__init_maps(obj, opts);
7255         err = err ? : bpf_object_init_progs(obj, opts);
7256         err = err ? : bpf_object__collect_relos(obj);
7257         if (err)
7258                 goto out;
7259
7260         bpf_object__elf_finish(obj);
7261
7262         return obj;
7263 out:
7264         bpf_object__close(obj);
7265         return ERR_PTR(err);
7266 }
7267
7268 struct bpf_object *
7269 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7270 {
7271         if (!path)
7272                 return libbpf_err_ptr(-EINVAL);
7273
7274         pr_debug("loading %s\n", path);
7275
7276         return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7277 }
7278
7279 struct bpf_object *bpf_object__open(const char *path)
7280 {
7281         return bpf_object__open_file(path, NULL);
7282 }
7283
7284 struct bpf_object *
7285 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7286                      const struct bpf_object_open_opts *opts)
7287 {
7288         if (!obj_buf || obj_buf_sz == 0)
7289                 return libbpf_err_ptr(-EINVAL);
7290
7291         return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7292 }
7293
7294 static int bpf_object_unload(struct bpf_object *obj)
7295 {
7296         size_t i;
7297
7298         if (!obj)
7299                 return libbpf_err(-EINVAL);
7300
7301         for (i = 0; i < obj->nr_maps; i++) {
7302                 zclose(obj->maps[i].fd);
7303                 if (obj->maps[i].st_ops)
7304                         zfree(&obj->maps[i].st_ops->kern_vdata);
7305         }
7306
7307         for (i = 0; i < obj->nr_programs; i++)
7308                 bpf_program__unload(&obj->programs[i]);
7309
7310         return 0;
7311 }
7312
7313 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7314 {
7315         struct bpf_map *m;
7316
7317         bpf_object__for_each_map(m, obj) {
7318                 if (!bpf_map__is_internal(m))
7319                         continue;
7320                 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7321                         m->def.map_flags &= ~BPF_F_MMAPABLE;
7322         }
7323
7324         return 0;
7325 }
7326
7327 int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
7328 {
7329         char sym_type, sym_name[500];
7330         unsigned long long sym_addr;
7331         int ret, err = 0;
7332         FILE *f;
7333
7334         f = fopen("/proc/kallsyms", "r");
7335         if (!f) {
7336                 err = -errno;
7337                 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7338                 return err;
7339         }
7340
7341         while (true) {
7342                 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7343                              &sym_addr, &sym_type, sym_name);
7344                 if (ret == EOF && feof(f))
7345                         break;
7346                 if (ret != 3) {
7347                         pr_warn("failed to read kallsyms entry: %d\n", ret);
7348                         err = -EINVAL;
7349                         break;
7350                 }
7351
7352                 err = cb(sym_addr, sym_type, sym_name, ctx);
7353                 if (err)
7354                         break;
7355         }
7356
7357         fclose(f);
7358         return err;
7359 }
7360
7361 static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
7362                        const char *sym_name, void *ctx)
7363 {
7364         struct bpf_object *obj = ctx;
7365         const struct btf_type *t;
7366         struct extern_desc *ext;
7367
7368         ext = find_extern_by_name(obj, sym_name);
7369         if (!ext || ext->type != EXT_KSYM)
7370                 return 0;
7371
7372         t = btf__type_by_id(obj->btf, ext->btf_id);
7373         if (!btf_is_var(t))
7374                 return 0;
7375
7376         if (ext->is_set && ext->ksym.addr != sym_addr) {
7377                 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
7378                         sym_name, ext->ksym.addr, sym_addr);
7379                 return -EINVAL;
7380         }
7381         if (!ext->is_set) {
7382                 ext->is_set = true;
7383                 ext->ksym.addr = sym_addr;
7384                 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
7385         }
7386         return 0;
7387 }
7388
7389 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7390 {
7391         return libbpf_kallsyms_parse(kallsyms_cb, obj);
7392 }
7393
7394 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7395                             __u16 kind, struct btf **res_btf,
7396                             struct module_btf **res_mod_btf)
7397 {
7398         struct module_btf *mod_btf;
7399         struct btf *btf;
7400         int i, id, err;
7401
7402         btf = obj->btf_vmlinux;
7403         mod_btf = NULL;
7404         id = btf__find_by_name_kind(btf, ksym_name, kind);
7405
7406         if (id == -ENOENT) {
7407                 err = load_module_btfs(obj);
7408                 if (err)
7409                         return err;
7410
7411                 for (i = 0; i < obj->btf_module_cnt; i++) {
7412                         /* we assume module_btf's BTF FD is always >0 */
7413                         mod_btf = &obj->btf_modules[i];
7414                         btf = mod_btf->btf;
7415                         id = btf__find_by_name_kind_own(btf, ksym_name, kind);
7416                         if (id != -ENOENT)
7417                                 break;
7418                 }
7419         }
7420         if (id <= 0)
7421                 return -ESRCH;
7422
7423         *res_btf = btf;
7424         *res_mod_btf = mod_btf;
7425         return id;
7426 }
7427
7428 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7429                                                struct extern_desc *ext)
7430 {
7431         const struct btf_type *targ_var, *targ_type;
7432         __u32 targ_type_id, local_type_id;
7433         struct module_btf *mod_btf = NULL;
7434         const char *targ_var_name;
7435         struct btf *btf = NULL;
7436         int id, err;
7437
7438         id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
7439         if (id < 0) {
7440                 if (id == -ESRCH && ext->is_weak)
7441                         return 0;
7442                 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
7443                         ext->name);
7444                 return id;
7445         }
7446
7447         /* find local type_id */
7448         local_type_id = ext->ksym.type_id;
7449
7450         /* find target type_id */
7451         targ_var = btf__type_by_id(btf, id);
7452         targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7453         targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7454
7455         err = bpf_core_types_are_compat(obj->btf, local_type_id,
7456                                         btf, targ_type_id);
7457         if (err <= 0) {
7458                 const struct btf_type *local_type;
7459                 const char *targ_name, *local_name;
7460
7461                 local_type = btf__type_by_id(obj->btf, local_type_id);
7462                 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7463                 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7464
7465                 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7466                         ext->name, local_type_id,
7467                         btf_kind_str(local_type), local_name, targ_type_id,
7468                         btf_kind_str(targ_type), targ_name);
7469                 return -EINVAL;
7470         }
7471
7472         ext->is_set = true;
7473         ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
7474         ext->ksym.kernel_btf_id = id;
7475         pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
7476                  ext->name, id, btf_kind_str(targ_var), targ_var_name);
7477
7478         return 0;
7479 }
7480
7481 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
7482                                                 struct extern_desc *ext)
7483 {
7484         int local_func_proto_id, kfunc_proto_id, kfunc_id;
7485         struct module_btf *mod_btf = NULL;
7486         const struct btf_type *kern_func;
7487         struct btf *kern_btf = NULL;
7488         int ret;
7489
7490         local_func_proto_id = ext->ksym.type_id;
7491
7492         kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
7493         if (kfunc_id < 0) {
7494                 if (kfunc_id == -ESRCH && ext->is_weak)
7495                         return 0;
7496                 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
7497                         ext->name);
7498                 return kfunc_id;
7499         }
7500
7501         kern_func = btf__type_by_id(kern_btf, kfunc_id);
7502         kfunc_proto_id = kern_func->type;
7503
7504         ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
7505                                         kern_btf, kfunc_proto_id);
7506         if (ret <= 0) {
7507                 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
7508                         ext->name, local_func_proto_id, kfunc_proto_id);
7509                 return -EINVAL;
7510         }
7511
7512         /* set index for module BTF fd in fd_array, if unset */
7513         if (mod_btf && !mod_btf->fd_array_idx) {
7514                 /* insn->off is s16 */
7515                 if (obj->fd_array_cnt == INT16_MAX) {
7516                         pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
7517                                 ext->name, mod_btf->fd_array_idx);
7518                         return -E2BIG;
7519                 }
7520                 /* Cannot use index 0 for module BTF fd */
7521                 if (!obj->fd_array_cnt)
7522                         obj->fd_array_cnt = 1;
7523
7524                 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
7525                                         obj->fd_array_cnt + 1);
7526                 if (ret)
7527                         return ret;
7528                 mod_btf->fd_array_idx = obj->fd_array_cnt;
7529                 /* we assume module BTF FD is always >0 */
7530                 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
7531         }
7532
7533         ext->is_set = true;
7534         ext->ksym.kernel_btf_id = kfunc_id;
7535         ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
7536         /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
7537          * populates FD into ld_imm64 insn when it's used to point to kfunc.
7538          * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
7539          * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
7540          */
7541         ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
7542         pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
7543                  ext->name, kfunc_id);
7544
7545         return 0;
7546 }
7547
7548 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7549 {
7550         const struct btf_type *t;
7551         struct extern_desc *ext;
7552         int i, err;
7553
7554         for (i = 0; i < obj->nr_extern; i++) {
7555                 ext = &obj->externs[i];
7556                 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7557                         continue;
7558
7559                 if (obj->gen_loader) {
7560                         ext->is_set = true;
7561                         ext->ksym.kernel_btf_obj_fd = 0;
7562                         ext->ksym.kernel_btf_id = 0;
7563                         continue;
7564                 }
7565                 t = btf__type_by_id(obj->btf, ext->btf_id);
7566                 if (btf_is_var(t))
7567                         err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
7568                 else
7569                         err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
7570                 if (err)
7571                         return err;
7572         }
7573         return 0;
7574 }
7575
7576 static int bpf_object__resolve_externs(struct bpf_object *obj,
7577                                        const char *extra_kconfig)
7578 {
7579         bool need_config = false, need_kallsyms = false;
7580         bool need_vmlinux_btf = false;
7581         struct extern_desc *ext;
7582         void *kcfg_data = NULL;
7583         int err, i;
7584
7585         if (obj->nr_extern == 0)
7586                 return 0;
7587
7588         if (obj->kconfig_map_idx >= 0)
7589                 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7590
7591         for (i = 0; i < obj->nr_extern; i++) {
7592                 ext = &obj->externs[i];
7593
7594                 if (ext->type == EXT_KSYM) {
7595                         if (ext->ksym.type_id)
7596                                 need_vmlinux_btf = true;
7597                         else
7598                                 need_kallsyms = true;
7599                         continue;
7600                 } else if (ext->type == EXT_KCFG) {
7601                         void *ext_ptr = kcfg_data + ext->kcfg.data_off;
7602                         __u64 value = 0;
7603
7604                         /* Kconfig externs need actual /proc/config.gz */
7605                         if (str_has_pfx(ext->name, "CONFIG_")) {
7606                                 need_config = true;
7607                                 continue;
7608                         }
7609
7610                         /* Virtual kcfg externs are customly handled by libbpf */
7611                         if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7612                                 value = get_kernel_version();
7613                                 if (!value) {
7614                                         pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
7615                                         return -EINVAL;
7616                                 }
7617                         } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
7618                                 value = kernel_supports(obj, FEAT_BPF_COOKIE);
7619                         } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
7620                                 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
7621                         } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
7622                                 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
7623                                  * __kconfig externs, where LINUX_ ones are virtual and filled out
7624                                  * customly by libbpf (their values don't come from Kconfig).
7625                                  * If LINUX_xxx variable is not recognized by libbpf, but is marked
7626                                  * __weak, it defaults to zero value, just like for CONFIG_xxx
7627                                  * externs.
7628                                  */
7629                                 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
7630                                 return -EINVAL;
7631                         }
7632
7633                         err = set_kcfg_value_num(ext, ext_ptr, value);
7634                         if (err)
7635                                 return err;
7636                         pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
7637                                  ext->name, (long long)value);
7638                 } else {
7639                         pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
7640                         return -EINVAL;
7641                 }
7642         }
7643         if (need_config && extra_kconfig) {
7644                 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7645                 if (err)
7646                         return -EINVAL;
7647                 need_config = false;
7648                 for (i = 0; i < obj->nr_extern; i++) {
7649                         ext = &obj->externs[i];
7650                         if (ext->type == EXT_KCFG && !ext->is_set) {
7651                                 need_config = true;
7652                                 break;
7653                         }
7654                 }
7655         }
7656         if (need_config) {
7657                 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7658                 if (err)
7659                         return -EINVAL;
7660         }
7661         if (need_kallsyms) {
7662                 err = bpf_object__read_kallsyms_file(obj);
7663                 if (err)
7664                         return -EINVAL;
7665         }
7666         if (need_vmlinux_btf) {
7667                 err = bpf_object__resolve_ksyms_btf_id(obj);
7668                 if (err)
7669                         return -EINVAL;
7670         }
7671         for (i = 0; i < obj->nr_extern; i++) {
7672                 ext = &obj->externs[i];
7673
7674                 if (!ext->is_set && !ext->is_weak) {
7675                         pr_warn("extern '%s' (strong): not resolved\n", ext->name);
7676                         return -ESRCH;
7677                 } else if (!ext->is_set) {
7678                         pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
7679                                  ext->name);
7680                 }
7681         }
7682
7683         return 0;
7684 }
7685
7686 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
7687 {
7688         int err, i;
7689
7690         if (!obj)
7691                 return libbpf_err(-EINVAL);
7692
7693         if (obj->loaded) {
7694                 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7695                 return libbpf_err(-EINVAL);
7696         }
7697
7698         if (obj->gen_loader)
7699                 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
7700
7701         err = bpf_object__probe_loading(obj);
7702         err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7703         err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7704         err = err ? : bpf_object__sanitize_and_load_btf(obj);
7705         err = err ? : bpf_object__sanitize_maps(obj);
7706         err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7707         err = err ? : bpf_object__create_maps(obj);
7708         err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
7709         err = err ? : bpf_object__load_progs(obj, extra_log_level);
7710         err = err ? : bpf_object_init_prog_arrays(obj);
7711
7712         if (obj->gen_loader) {
7713                 /* reset FDs */
7714                 if (obj->btf)
7715                         btf__set_fd(obj->btf, -1);
7716                 for (i = 0; i < obj->nr_maps; i++)
7717                         obj->maps[i].fd = -1;
7718                 if (!err)
7719                         err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
7720         }
7721
7722         /* clean up fd_array */
7723         zfree(&obj->fd_array);
7724
7725         /* clean up module BTFs */
7726         for (i = 0; i < obj->btf_module_cnt; i++) {
7727                 close(obj->btf_modules[i].fd);
7728                 btf__free(obj->btf_modules[i].btf);
7729                 free(obj->btf_modules[i].name);
7730         }
7731         free(obj->btf_modules);
7732
7733         /* clean up vmlinux BTF */
7734         btf__free(obj->btf_vmlinux);
7735         obj->btf_vmlinux = NULL;
7736
7737         obj->loaded = true; /* doesn't matter if successfully or not */
7738
7739         if (err)
7740                 goto out;
7741
7742         return 0;
7743 out:
7744         /* unpin any maps that were auto-pinned during load */
7745         for (i = 0; i < obj->nr_maps; i++)
7746                 if (obj->maps[i].pinned && !obj->maps[i].reused)
7747                         bpf_map__unpin(&obj->maps[i], NULL);
7748
7749         bpf_object_unload(obj);
7750         pr_warn("failed to load object '%s'\n", obj->path);
7751         return libbpf_err(err);
7752 }
7753
7754 int bpf_object__load(struct bpf_object *obj)
7755 {
7756         return bpf_object_load(obj, 0, NULL);
7757 }
7758
7759 static int make_parent_dir(const char *path)
7760 {
7761         char *cp, errmsg[STRERR_BUFSIZE];
7762         char *dname, *dir;
7763         int err = 0;
7764
7765         dname = strdup(path);
7766         if (dname == NULL)
7767                 return -ENOMEM;
7768
7769         dir = dirname(dname);
7770         if (mkdir(dir, 0700) && errno != EEXIST)
7771                 err = -errno;
7772
7773         free(dname);
7774         if (err) {
7775                 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7776                 pr_warn("failed to mkdir %s: %s\n", path, cp);
7777         }
7778         return err;
7779 }
7780
7781 static int check_path(const char *path)
7782 {
7783         char *cp, errmsg[STRERR_BUFSIZE];
7784         struct statfs st_fs;
7785         char *dname, *dir;
7786         int err = 0;
7787
7788         if (path == NULL)
7789                 return -EINVAL;
7790
7791         dname = strdup(path);
7792         if (dname == NULL)
7793                 return -ENOMEM;
7794
7795         dir = dirname(dname);
7796         if (statfs(dir, &st_fs)) {
7797                 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7798                 pr_warn("failed to statfs %s: %s\n", dir, cp);
7799                 err = -errno;
7800         }
7801         free(dname);
7802
7803         if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7804                 pr_warn("specified path %s is not on BPF FS\n", path);
7805                 err = -EINVAL;
7806         }
7807
7808         return err;
7809 }
7810
7811 int bpf_program__pin(struct bpf_program *prog, const char *path)
7812 {
7813         char *cp, errmsg[STRERR_BUFSIZE];
7814         int err;
7815
7816         if (prog->fd < 0) {
7817                 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
7818                 return libbpf_err(-EINVAL);
7819         }
7820
7821         err = make_parent_dir(path);
7822         if (err)
7823                 return libbpf_err(err);
7824
7825         err = check_path(path);
7826         if (err)
7827                 return libbpf_err(err);
7828
7829         if (bpf_obj_pin(prog->fd, path)) {
7830                 err = -errno;
7831                 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7832                 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
7833                 return libbpf_err(err);
7834         }
7835
7836         pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
7837         return 0;
7838 }
7839
7840 int bpf_program__unpin(struct bpf_program *prog, const char *path)
7841 {
7842         int err;
7843
7844         if (prog->fd < 0) {
7845                 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
7846                 return libbpf_err(-EINVAL);
7847         }
7848
7849         err = check_path(path);
7850         if (err)
7851                 return libbpf_err(err);
7852
7853         err = unlink(path);
7854         if (err)
7855                 return libbpf_err(-errno);
7856
7857         pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
7858         return 0;
7859 }
7860
7861 int bpf_map__pin(struct bpf_map *map, const char *path)
7862 {
7863         char *cp, errmsg[STRERR_BUFSIZE];
7864         int err;
7865
7866         if (map == NULL) {
7867                 pr_warn("invalid map pointer\n");
7868                 return libbpf_err(-EINVAL);
7869         }
7870
7871         if (map->pin_path) {
7872                 if (path && strcmp(path, map->pin_path)) {
7873                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7874                                 bpf_map__name(map), map->pin_path, path);
7875                         return libbpf_err(-EINVAL);
7876                 } else if (map->pinned) {
7877                         pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7878                                  bpf_map__name(map), map->pin_path);
7879                         return 0;
7880                 }
7881         } else {
7882                 if (!path) {
7883                         pr_warn("missing a path to pin map '%s' at\n",
7884                                 bpf_map__name(map));
7885                         return libbpf_err(-EINVAL);
7886                 } else if (map->pinned) {
7887                         pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7888                         return libbpf_err(-EEXIST);
7889                 }
7890
7891                 map->pin_path = strdup(path);
7892                 if (!map->pin_path) {
7893                         err = -errno;
7894                         goto out_err;
7895                 }
7896         }
7897
7898         err = make_parent_dir(map->pin_path);
7899         if (err)
7900                 return libbpf_err(err);
7901
7902         err = check_path(map->pin_path);
7903         if (err)
7904                 return libbpf_err(err);
7905
7906         if (bpf_obj_pin(map->fd, map->pin_path)) {
7907                 err = -errno;
7908                 goto out_err;
7909         }
7910
7911         map->pinned = true;
7912         pr_debug("pinned map '%s'\n", map->pin_path);
7913
7914         return 0;
7915
7916 out_err:
7917         cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7918         pr_warn("failed to pin map: %s\n", cp);
7919         return libbpf_err(err);
7920 }
7921
7922 int bpf_map__unpin(struct bpf_map *map, const char *path)
7923 {
7924         int err;
7925
7926         if (map == NULL) {
7927                 pr_warn("invalid map pointer\n");
7928                 return libbpf_err(-EINVAL);
7929         }
7930
7931         if (map->pin_path) {
7932                 if (path && strcmp(path, map->pin_path)) {
7933                         pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7934                                 bpf_map__name(map), map->pin_path, path);
7935                         return libbpf_err(-EINVAL);
7936                 }
7937                 path = map->pin_path;
7938         } else if (!path) {
7939                 pr_warn("no path to unpin map '%s' from\n",
7940                         bpf_map__name(map));
7941                 return libbpf_err(-EINVAL);
7942         }
7943
7944         err = check_path(path);
7945         if (err)
7946                 return libbpf_err(err);
7947
7948         err = unlink(path);
7949         if (err != 0)
7950                 return libbpf_err(-errno);
7951
7952         map->pinned = false;
7953         pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7954
7955         return 0;
7956 }
7957
7958 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7959 {
7960         char *new = NULL;
7961
7962         if (path) {
7963                 new = strdup(path);
7964                 if (!new)
7965                         return libbpf_err(-errno);
7966         }
7967
7968         free(map->pin_path);
7969         map->pin_path = new;
7970         return 0;
7971 }
7972
7973 __alias(bpf_map__pin_path)
7974 const char *bpf_map__get_pin_path(const struct bpf_map *map);
7975
7976 const char *bpf_map__pin_path(const struct bpf_map *map)
7977 {
7978         return map->pin_path;
7979 }
7980
7981 bool bpf_map__is_pinned(const struct bpf_map *map)
7982 {
7983         return map->pinned;
7984 }
7985
7986 static void sanitize_pin_path(char *s)
7987 {
7988         /* bpffs disallows periods in path names */
7989         while (*s) {
7990                 if (*s == '.')
7991                         *s = '_';
7992                 s++;
7993         }
7994 }
7995
7996 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7997 {
7998         struct bpf_map *map;
7999         int err;
8000
8001         if (!obj)
8002                 return libbpf_err(-ENOENT);
8003
8004         if (!obj->loaded) {
8005                 pr_warn("object not yet loaded; load it first\n");
8006                 return libbpf_err(-ENOENT);
8007         }
8008
8009         bpf_object__for_each_map(map, obj) {
8010                 char *pin_path = NULL;
8011                 char buf[PATH_MAX];
8012
8013                 if (!map->autocreate)
8014                         continue;
8015
8016                 if (path) {
8017                         err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8018                         if (err)
8019                                 goto err_unpin_maps;
8020                         sanitize_pin_path(buf);
8021                         pin_path = buf;
8022                 } else if (!map->pin_path) {
8023                         continue;
8024                 }
8025
8026                 err = bpf_map__pin(map, pin_path);
8027                 if (err)
8028                         goto err_unpin_maps;
8029         }
8030
8031         return 0;
8032
8033 err_unpin_maps:
8034         while ((map = bpf_object__prev_map(obj, map))) {
8035                 if (!map->pin_path)
8036                         continue;
8037
8038                 bpf_map__unpin(map, NULL);
8039         }
8040
8041         return libbpf_err(err);
8042 }
8043
8044 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8045 {
8046         struct bpf_map *map;
8047         int err;
8048
8049         if (!obj)
8050                 return libbpf_err(-ENOENT);
8051
8052         bpf_object__for_each_map(map, obj) {
8053                 char *pin_path = NULL;
8054                 char buf[PATH_MAX];
8055
8056                 if (path) {
8057                         err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8058                         if (err)
8059                                 return libbpf_err(err);
8060                         sanitize_pin_path(buf);
8061                         pin_path = buf;
8062                 } else if (!map->pin_path) {
8063                         continue;
8064                 }
8065
8066                 err = bpf_map__unpin(map, pin_path);
8067                 if (err)
8068                         return libbpf_err(err);
8069         }
8070
8071         return 0;
8072 }
8073
8074 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8075 {
8076         struct bpf_program *prog;
8077         char buf[PATH_MAX];
8078         int err;
8079
8080         if (!obj)
8081                 return libbpf_err(-ENOENT);
8082
8083         if (!obj->loaded) {
8084                 pr_warn("object not yet loaded; load it first\n");
8085                 return libbpf_err(-ENOENT);
8086         }
8087
8088         bpf_object__for_each_program(prog, obj) {
8089                 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8090                 if (err)
8091                         goto err_unpin_programs;
8092
8093                 err = bpf_program__pin(prog, buf);
8094                 if (err)
8095                         goto err_unpin_programs;
8096         }
8097
8098         return 0;
8099
8100 err_unpin_programs:
8101         while ((prog = bpf_object__prev_program(obj, prog))) {
8102                 if (pathname_concat(buf, sizeof(buf), path, prog->name))
8103                         continue;
8104
8105                 bpf_program__unpin(prog, buf);
8106         }
8107
8108         return libbpf_err(err);
8109 }
8110
8111 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8112 {
8113         struct bpf_program *prog;
8114         int err;
8115
8116         if (!obj)
8117                 return libbpf_err(-ENOENT);
8118
8119         bpf_object__for_each_program(prog, obj) {
8120                 char buf[PATH_MAX];
8121
8122                 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8123                 if (err)
8124                         return libbpf_err(err);
8125
8126                 err = bpf_program__unpin(prog, buf);
8127                 if (err)
8128                         return libbpf_err(err);
8129         }
8130
8131         return 0;
8132 }
8133
8134 int bpf_object__pin(struct bpf_object *obj, const char *path)
8135 {
8136         int err;
8137
8138         err = bpf_object__pin_maps(obj, path);
8139         if (err)
8140                 return libbpf_err(err);
8141
8142         err = bpf_object__pin_programs(obj, path);
8143         if (err) {
8144                 bpf_object__unpin_maps(obj, path);
8145                 return libbpf_err(err);
8146         }
8147
8148         return 0;
8149 }
8150
8151 static void bpf_map__destroy(struct bpf_map *map)
8152 {
8153         if (map->inner_map) {
8154                 bpf_map__destroy(map->inner_map);
8155                 zfree(&map->inner_map);
8156         }
8157
8158         zfree(&map->init_slots);
8159         map->init_slots_sz = 0;
8160
8161         if (map->mmaped) {
8162                 munmap(map->mmaped, bpf_map_mmap_sz(map));
8163                 map->mmaped = NULL;
8164         }
8165
8166         if (map->st_ops) {
8167                 zfree(&map->st_ops->data);
8168                 zfree(&map->st_ops->progs);
8169                 zfree(&map->st_ops->kern_func_off);
8170                 zfree(&map->st_ops);
8171         }
8172
8173         zfree(&map->name);
8174         zfree(&map->real_name);
8175         zfree(&map->pin_path);
8176
8177         if (map->fd >= 0)
8178                 zclose(map->fd);
8179 }
8180
8181 void bpf_object__close(struct bpf_object *obj)
8182 {
8183         size_t i;
8184
8185         if (IS_ERR_OR_NULL(obj))
8186                 return;
8187
8188         usdt_manager_free(obj->usdt_man);
8189         obj->usdt_man = NULL;
8190
8191         bpf_gen__free(obj->gen_loader);
8192         bpf_object__elf_finish(obj);
8193         bpf_object_unload(obj);
8194         btf__free(obj->btf);
8195         btf_ext__free(obj->btf_ext);
8196
8197         for (i = 0; i < obj->nr_maps; i++)
8198                 bpf_map__destroy(&obj->maps[i]);
8199
8200         zfree(&obj->btf_custom_path);
8201         zfree(&obj->kconfig);
8202         zfree(&obj->externs);
8203         obj->nr_extern = 0;
8204
8205         zfree(&obj->maps);
8206         obj->nr_maps = 0;
8207
8208         if (obj->programs && obj->nr_programs) {
8209                 for (i = 0; i < obj->nr_programs; i++)
8210                         bpf_program__exit(&obj->programs[i]);
8211         }
8212         zfree(&obj->programs);
8213
8214         free(obj);
8215 }
8216
8217 const char *bpf_object__name(const struct bpf_object *obj)
8218 {
8219         return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8220 }
8221
8222 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8223 {
8224         return obj ? obj->kern_version : 0;
8225 }
8226
8227 struct btf *bpf_object__btf(const struct bpf_object *obj)
8228 {
8229         return obj ? obj->btf : NULL;
8230 }
8231
8232 int bpf_object__btf_fd(const struct bpf_object *obj)
8233 {
8234         return obj->btf ? btf__fd(obj->btf) : -1;
8235 }
8236
8237 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8238 {
8239         if (obj->loaded)
8240                 return libbpf_err(-EINVAL);
8241
8242         obj->kern_version = kern_version;
8243
8244         return 0;
8245 }
8246
8247 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8248 {
8249         struct bpf_gen *gen;
8250
8251         if (!opts)
8252                 return -EFAULT;
8253         if (!OPTS_VALID(opts, gen_loader_opts))
8254                 return -EINVAL;
8255         gen = calloc(sizeof(*gen), 1);
8256         if (!gen)
8257                 return -ENOMEM;
8258         gen->opts = opts;
8259         obj->gen_loader = gen;
8260         return 0;
8261 }
8262
8263 static struct bpf_program *
8264 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8265                     bool forward)
8266 {
8267         size_t nr_programs = obj->nr_programs;
8268         ssize_t idx;
8269
8270         if (!nr_programs)
8271                 return NULL;
8272
8273         if (!p)
8274                 /* Iter from the beginning */
8275                 return forward ? &obj->programs[0] :
8276                         &obj->programs[nr_programs - 1];
8277
8278         if (p->obj != obj) {
8279                 pr_warn("error: program handler doesn't match object\n");
8280                 return errno = EINVAL, NULL;
8281         }
8282
8283         idx = (p - obj->programs) + (forward ? 1 : -1);
8284         if (idx >= obj->nr_programs || idx < 0)
8285                 return NULL;
8286         return &obj->programs[idx];
8287 }
8288
8289 struct bpf_program *
8290 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
8291 {
8292         struct bpf_program *prog = prev;
8293
8294         do {
8295                 prog = __bpf_program__iter(prog, obj, true);
8296         } while (prog && prog_is_subprog(obj, prog));
8297
8298         return prog;
8299 }
8300
8301 struct bpf_program *
8302 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
8303 {
8304         struct bpf_program *prog = next;
8305
8306         do {
8307                 prog = __bpf_program__iter(prog, obj, false);
8308         } while (prog && prog_is_subprog(obj, prog));
8309
8310         return prog;
8311 }
8312
8313 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8314 {
8315         prog->prog_ifindex = ifindex;
8316 }
8317
8318 const char *bpf_program__name(const struct bpf_program *prog)
8319 {
8320         return prog->name;
8321 }
8322
8323 const char *bpf_program__section_name(const struct bpf_program *prog)
8324 {
8325         return prog->sec_name;
8326 }
8327
8328 bool bpf_program__autoload(const struct bpf_program *prog)
8329 {
8330         return prog->autoload;
8331 }
8332
8333 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8334 {
8335         if (prog->obj->loaded)
8336                 return libbpf_err(-EINVAL);
8337
8338         prog->autoload = autoload;
8339         return 0;
8340 }
8341
8342 bool bpf_program__autoattach(const struct bpf_program *prog)
8343 {
8344         return prog->autoattach;
8345 }
8346
8347 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
8348 {
8349         prog->autoattach = autoattach;
8350 }
8351
8352 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
8353 {
8354         return prog->insns;
8355 }
8356
8357 size_t bpf_program__insn_cnt(const struct bpf_program *prog)
8358 {
8359         return prog->insns_cnt;
8360 }
8361
8362 int bpf_program__set_insns(struct bpf_program *prog,
8363                            struct bpf_insn *new_insns, size_t new_insn_cnt)
8364 {
8365         struct bpf_insn *insns;
8366
8367         if (prog->obj->loaded)
8368                 return -EBUSY;
8369
8370         insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
8371         if (!insns) {
8372                 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
8373                 return -ENOMEM;
8374         }
8375         memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
8376
8377         prog->insns = insns;
8378         prog->insns_cnt = new_insn_cnt;
8379         return 0;
8380 }
8381
8382 int bpf_program__fd(const struct bpf_program *prog)
8383 {
8384         if (!prog)
8385                 return libbpf_err(-EINVAL);
8386
8387         if (prog->fd < 0)
8388                 return libbpf_err(-ENOENT);
8389
8390         return prog->fd;
8391 }
8392
8393 __alias(bpf_program__type)
8394 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
8395
8396 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
8397 {
8398         return prog->type;
8399 }
8400
8401 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8402 {
8403         if (prog->obj->loaded)
8404                 return libbpf_err(-EBUSY);
8405
8406         prog->type = type;
8407         return 0;
8408 }
8409
8410 __alias(bpf_program__expected_attach_type)
8411 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
8412
8413 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
8414 {
8415         return prog->expected_attach_type;
8416 }
8417
8418 int bpf_program__set_expected_attach_type(struct bpf_program *prog,
8419                                            enum bpf_attach_type type)
8420 {
8421         if (prog->obj->loaded)
8422                 return libbpf_err(-EBUSY);
8423
8424         prog->expected_attach_type = type;
8425         return 0;
8426 }
8427
8428 __u32 bpf_program__flags(const struct bpf_program *prog)
8429 {
8430         return prog->prog_flags;
8431 }
8432
8433 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
8434 {
8435         if (prog->obj->loaded)
8436                 return libbpf_err(-EBUSY);
8437
8438         prog->prog_flags = flags;
8439         return 0;
8440 }
8441
8442 __u32 bpf_program__log_level(const struct bpf_program *prog)
8443 {
8444         return prog->log_level;
8445 }
8446
8447 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
8448 {
8449         if (prog->obj->loaded)
8450                 return libbpf_err(-EBUSY);
8451
8452         prog->log_level = log_level;
8453         return 0;
8454 }
8455
8456 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
8457 {
8458         *log_size = prog->log_size;
8459         return prog->log_buf;
8460 }
8461
8462 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
8463 {
8464         if (log_size && !log_buf)
8465                 return -EINVAL;
8466         if (prog->log_size > UINT_MAX)
8467                 return -EINVAL;
8468         if (prog->obj->loaded)
8469                 return -EBUSY;
8470
8471         prog->log_buf = log_buf;
8472         prog->log_size = log_size;
8473         return 0;
8474 }
8475
8476 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {                        \
8477         .sec = (char *)sec_pfx,                                             \
8478         .prog_type = BPF_PROG_TYPE_##ptype,                                 \
8479         .expected_attach_type = atype,                                      \
8480         .cookie = (long)(flags),                                            \
8481         .prog_prepare_load_fn = libbpf_prepare_prog_load,                   \
8482         __VA_ARGS__                                                         \
8483 }
8484
8485 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8486 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8487 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8488 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8489 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8490 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8491 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8492 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8493 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8494 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8495
8496 static const struct bpf_sec_def section_defs[] = {
8497         SEC_DEF("socket",               SOCKET_FILTER, 0, SEC_NONE),
8498         SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
8499         SEC_DEF("sk_reuseport",         SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
8500         SEC_DEF("kprobe+",              KPROBE, 0, SEC_NONE, attach_kprobe),
8501         SEC_DEF("uprobe+",              KPROBE, 0, SEC_NONE, attach_uprobe),
8502         SEC_DEF("uprobe.s+",            KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
8503         SEC_DEF("kretprobe+",           KPROBE, 0, SEC_NONE, attach_kprobe),
8504         SEC_DEF("uretprobe+",           KPROBE, 0, SEC_NONE, attach_uprobe),
8505         SEC_DEF("uretprobe.s+",         KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
8506         SEC_DEF("kprobe.multi+",        KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
8507         SEC_DEF("kretprobe.multi+",     KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
8508         SEC_DEF("ksyscall+",            KPROBE, 0, SEC_NONE, attach_ksyscall),
8509         SEC_DEF("kretsyscall+",         KPROBE, 0, SEC_NONE, attach_ksyscall),
8510         SEC_DEF("usdt+",                KPROBE, 0, SEC_NONE, attach_usdt),
8511         SEC_DEF("tc",                   SCHED_CLS, 0, SEC_NONE),
8512         SEC_DEF("classifier",           SCHED_CLS, 0, SEC_NONE),
8513         SEC_DEF("action",               SCHED_ACT, 0, SEC_NONE),
8514         SEC_DEF("tracepoint+",          TRACEPOINT, 0, SEC_NONE, attach_tp),
8515         SEC_DEF("tp+",                  TRACEPOINT, 0, SEC_NONE, attach_tp),
8516         SEC_DEF("raw_tracepoint+",      RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
8517         SEC_DEF("raw_tp+",              RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
8518         SEC_DEF("raw_tracepoint.w+",    RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
8519         SEC_DEF("raw_tp.w+",            RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
8520         SEC_DEF("tp_btf+",              TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
8521         SEC_DEF("fentry+",              TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
8522         SEC_DEF("fmod_ret+",            TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
8523         SEC_DEF("fexit+",               TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
8524         SEC_DEF("fentry.s+",            TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8525         SEC_DEF("fmod_ret.s+",          TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8526         SEC_DEF("fexit.s+",             TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8527         SEC_DEF("freplace+",            EXT, 0, SEC_ATTACH_BTF, attach_trace),
8528         SEC_DEF("lsm+",                 LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
8529         SEC_DEF("lsm.s+",               LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
8530         SEC_DEF("lsm_cgroup+",          LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
8531         SEC_DEF("iter+",                TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
8532         SEC_DEF("iter.s+",              TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
8533         SEC_DEF("syscall",              SYSCALL, 0, SEC_SLEEPABLE),
8534         SEC_DEF("xdp.frags/devmap",     XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
8535         SEC_DEF("xdp/devmap",           XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
8536         SEC_DEF("xdp.frags/cpumap",     XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
8537         SEC_DEF("xdp/cpumap",           XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
8538         SEC_DEF("xdp.frags",            XDP, BPF_XDP, SEC_XDP_FRAGS),
8539         SEC_DEF("xdp",                  XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
8540         SEC_DEF("perf_event",           PERF_EVENT, 0, SEC_NONE),
8541         SEC_DEF("lwt_in",               LWT_IN, 0, SEC_NONE),
8542         SEC_DEF("lwt_out",              LWT_OUT, 0, SEC_NONE),
8543         SEC_DEF("lwt_xmit",             LWT_XMIT, 0, SEC_NONE),
8544         SEC_DEF("lwt_seg6local",        LWT_SEG6LOCAL, 0, SEC_NONE),
8545         SEC_DEF("sockops",              SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
8546         SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
8547         SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
8548         SEC_DEF("sk_skb",               SK_SKB, 0, SEC_NONE),
8549         SEC_DEF("sk_msg",               SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
8550         SEC_DEF("lirc_mode2",           LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
8551         SEC_DEF("flow_dissector",       FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
8552         SEC_DEF("cgroup_skb/ingress",   CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
8553         SEC_DEF("cgroup_skb/egress",    CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
8554         SEC_DEF("cgroup/skb",           CGROUP_SKB, 0, SEC_NONE),
8555         SEC_DEF("cgroup/sock_create",   CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
8556         SEC_DEF("cgroup/sock_release",  CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
8557         SEC_DEF("cgroup/sock",          CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
8558         SEC_DEF("cgroup/post_bind4",    CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
8559         SEC_DEF("cgroup/post_bind6",    CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
8560         SEC_DEF("cgroup/bind4",         CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
8561         SEC_DEF("cgroup/bind6",         CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
8562         SEC_DEF("cgroup/connect4",      CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
8563         SEC_DEF("cgroup/connect6",      CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
8564         SEC_DEF("cgroup/sendmsg4",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
8565         SEC_DEF("cgroup/sendmsg6",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
8566         SEC_DEF("cgroup/recvmsg4",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
8567         SEC_DEF("cgroup/recvmsg6",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
8568         SEC_DEF("cgroup/getpeername4",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
8569         SEC_DEF("cgroup/getpeername6",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
8570         SEC_DEF("cgroup/getsockname4",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
8571         SEC_DEF("cgroup/getsockname6",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
8572         SEC_DEF("cgroup/sysctl",        CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
8573         SEC_DEF("cgroup/getsockopt",    CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
8574         SEC_DEF("cgroup/setsockopt",    CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
8575         SEC_DEF("cgroup/dev",           CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
8576         SEC_DEF("struct_ops+",          STRUCT_OPS, 0, SEC_NONE),
8577         SEC_DEF("struct_ops.s+",        STRUCT_OPS, 0, SEC_SLEEPABLE),
8578         SEC_DEF("sk_lookup",            SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
8579 };
8580
8581 static size_t custom_sec_def_cnt;
8582 static struct bpf_sec_def *custom_sec_defs;
8583 static struct bpf_sec_def custom_fallback_def;
8584 static bool has_custom_fallback_def;
8585
8586 static int last_custom_sec_def_handler_id;
8587
8588 int libbpf_register_prog_handler(const char *sec,
8589                                  enum bpf_prog_type prog_type,
8590                                  enum bpf_attach_type exp_attach_type,
8591                                  const struct libbpf_prog_handler_opts *opts)
8592 {
8593         struct bpf_sec_def *sec_def;
8594
8595         if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
8596                 return libbpf_err(-EINVAL);
8597
8598         if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
8599                 return libbpf_err(-E2BIG);
8600
8601         if (sec) {
8602                 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
8603                                               sizeof(*sec_def));
8604                 if (!sec_def)
8605                         return libbpf_err(-ENOMEM);
8606
8607                 custom_sec_defs = sec_def;
8608                 sec_def = &custom_sec_defs[custom_sec_def_cnt];
8609         } else {
8610                 if (has_custom_fallback_def)
8611                         return libbpf_err(-EBUSY);
8612
8613                 sec_def = &custom_fallback_def;
8614         }
8615
8616         sec_def->sec = sec ? strdup(sec) : NULL;
8617         if (sec && !sec_def->sec)
8618                 return libbpf_err(-ENOMEM);
8619
8620         sec_def->prog_type = prog_type;
8621         sec_def->expected_attach_type = exp_attach_type;
8622         sec_def->cookie = OPTS_GET(opts, cookie, 0);
8623
8624         sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
8625         sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
8626         sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
8627
8628         sec_def->handler_id = ++last_custom_sec_def_handler_id;
8629
8630         if (sec)
8631                 custom_sec_def_cnt++;
8632         else
8633                 has_custom_fallback_def = true;
8634
8635         return sec_def->handler_id;
8636 }
8637
8638 int libbpf_unregister_prog_handler(int handler_id)
8639 {
8640         struct bpf_sec_def *sec_defs;
8641         int i;
8642
8643         if (handler_id <= 0)
8644                 return libbpf_err(-EINVAL);
8645
8646         if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
8647                 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
8648                 has_custom_fallback_def = false;
8649                 return 0;
8650         }
8651
8652         for (i = 0; i < custom_sec_def_cnt; i++) {
8653                 if (custom_sec_defs[i].handler_id == handler_id)
8654                         break;
8655         }
8656
8657         if (i == custom_sec_def_cnt)
8658                 return libbpf_err(-ENOENT);
8659
8660         free(custom_sec_defs[i].sec);
8661         for (i = i + 1; i < custom_sec_def_cnt; i++)
8662                 custom_sec_defs[i - 1] = custom_sec_defs[i];
8663         custom_sec_def_cnt--;
8664
8665         /* try to shrink the array, but it's ok if we couldn't */
8666         sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
8667         if (sec_defs)
8668                 custom_sec_defs = sec_defs;
8669
8670         return 0;
8671 }
8672
8673 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
8674 {
8675         size_t len = strlen(sec_def->sec);
8676
8677         /* "type/" always has to have proper SEC("type/extras") form */
8678         if (sec_def->sec[len - 1] == '/') {
8679                 if (str_has_pfx(sec_name, sec_def->sec))
8680                         return true;
8681                 return false;
8682         }
8683
8684         /* "type+" means it can be either exact SEC("type") or
8685          * well-formed SEC("type/extras") with proper '/' separator
8686          */
8687         if (sec_def->sec[len - 1] == '+') {
8688                 len--;
8689                 /* not even a prefix */
8690                 if (strncmp(sec_name, sec_def->sec, len) != 0)
8691                         return false;
8692                 /* exact match or has '/' separator */
8693                 if (sec_name[len] == '\0' || sec_name[len] == '/')
8694                         return true;
8695                 return false;
8696         }
8697
8698         return strcmp(sec_name, sec_def->sec) == 0;
8699 }
8700
8701 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8702 {
8703         const struct bpf_sec_def *sec_def;
8704         int i, n;
8705
8706         n = custom_sec_def_cnt;
8707         for (i = 0; i < n; i++) {
8708                 sec_def = &custom_sec_defs[i];
8709                 if (sec_def_matches(sec_def, sec_name))
8710                         return sec_def;
8711         }
8712
8713         n = ARRAY_SIZE(section_defs);
8714         for (i = 0; i < n; i++) {
8715                 sec_def = &section_defs[i];
8716                 if (sec_def_matches(sec_def, sec_name))
8717                         return sec_def;
8718         }
8719
8720         if (has_custom_fallback_def)
8721                 return &custom_fallback_def;
8722
8723         return NULL;
8724 }
8725
8726 #define MAX_TYPE_NAME_SIZE 32
8727
8728 static char *libbpf_get_type_names(bool attach_type)
8729 {
8730         int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8731         char *buf;
8732
8733         buf = malloc(len);
8734         if (!buf)
8735                 return NULL;
8736
8737         buf[0] = '\0';
8738         /* Forge string buf with all available names */
8739         for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8740                 const struct bpf_sec_def *sec_def = &section_defs[i];
8741
8742                 if (attach_type) {
8743                         if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
8744                                 continue;
8745
8746                         if (!(sec_def->cookie & SEC_ATTACHABLE))
8747                                 continue;
8748                 }
8749
8750                 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8751                         free(buf);
8752                         return NULL;
8753                 }
8754                 strcat(buf, " ");
8755                 strcat(buf, section_defs[i].sec);
8756         }
8757
8758         return buf;
8759 }
8760
8761 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8762                              enum bpf_attach_type *expected_attach_type)
8763 {
8764         const struct bpf_sec_def *sec_def;
8765         char *type_names;
8766
8767         if (!name)
8768                 return libbpf_err(-EINVAL);
8769
8770         sec_def = find_sec_def(name);
8771         if (sec_def) {
8772                 *prog_type = sec_def->prog_type;
8773                 *expected_attach_type = sec_def->expected_attach_type;
8774                 return 0;
8775         }
8776
8777         pr_debug("failed to guess program type from ELF section '%s'\n", name);
8778         type_names = libbpf_get_type_names(false);
8779         if (type_names != NULL) {
8780                 pr_debug("supported section(type) names are:%s\n", type_names);
8781                 free(type_names);
8782         }
8783
8784         return libbpf_err(-ESRCH);
8785 }
8786
8787 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
8788 {
8789         if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
8790                 return NULL;
8791
8792         return attach_type_name[t];
8793 }
8794
8795 const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
8796 {
8797         if (t < 0 || t >= ARRAY_SIZE(link_type_name))
8798                 return NULL;
8799
8800         return link_type_name[t];
8801 }
8802
8803 const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
8804 {
8805         if (t < 0 || t >= ARRAY_SIZE(map_type_name))
8806                 return NULL;
8807
8808         return map_type_name[t];
8809 }
8810
8811 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
8812 {
8813         if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
8814                 return NULL;
8815
8816         return prog_type_name[t];
8817 }
8818
8819 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8820                                                      size_t offset)
8821 {
8822         struct bpf_map *map;
8823         size_t i;
8824
8825         for (i = 0; i < obj->nr_maps; i++) {
8826                 map = &obj->maps[i];
8827                 if (!bpf_map__is_struct_ops(map))
8828                         continue;
8829                 if (map->sec_offset <= offset &&
8830                     offset - map->sec_offset < map->def.value_size)
8831                         return map;
8832         }
8833
8834         return NULL;
8835 }
8836
8837 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8838 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8839                                             Elf64_Shdr *shdr, Elf_Data *data)
8840 {
8841         const struct btf_member *member;
8842         struct bpf_struct_ops *st_ops;
8843         struct bpf_program *prog;
8844         unsigned int shdr_idx;
8845         const struct btf *btf;
8846         struct bpf_map *map;
8847         unsigned int moff, insn_idx;
8848         const char *name;
8849         __u32 member_idx;
8850         Elf64_Sym *sym;
8851         Elf64_Rel *rel;
8852         int i, nrels;
8853
8854         btf = obj->btf;
8855         nrels = shdr->sh_size / shdr->sh_entsize;
8856         for (i = 0; i < nrels; i++) {
8857                 rel = elf_rel_by_idx(data, i);
8858                 if (!rel) {
8859                         pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8860                         return -LIBBPF_ERRNO__FORMAT;
8861                 }
8862
8863                 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
8864                 if (!sym) {
8865                         pr_warn("struct_ops reloc: symbol %zx not found\n",
8866                                 (size_t)ELF64_R_SYM(rel->r_info));
8867                         return -LIBBPF_ERRNO__FORMAT;
8868                 }
8869
8870                 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
8871                 map = find_struct_ops_map_by_offset(obj, rel->r_offset);
8872                 if (!map) {
8873                         pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
8874                                 (size_t)rel->r_offset);
8875                         return -EINVAL;
8876                 }
8877
8878                 moff = rel->r_offset - map->sec_offset;
8879                 shdr_idx = sym->st_shndx;
8880                 st_ops = map->st_ops;
8881                 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8882                          map->name,
8883                          (long long)(rel->r_info >> 32),
8884                          (long long)sym->st_value,
8885                          shdr_idx, (size_t)rel->r_offset,
8886                          map->sec_offset, sym->st_name, name);
8887
8888                 if (shdr_idx >= SHN_LORESERVE) {
8889                         pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
8890                                 map->name, (size_t)rel->r_offset, shdr_idx);
8891                         return -LIBBPF_ERRNO__RELOC;
8892                 }
8893                 if (sym->st_value % BPF_INSN_SZ) {
8894                         pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8895                                 map->name, (unsigned long long)sym->st_value);
8896                         return -LIBBPF_ERRNO__FORMAT;
8897                 }
8898                 insn_idx = sym->st_value / BPF_INSN_SZ;
8899
8900                 member = find_member_by_offset(st_ops->type, moff * 8);
8901                 if (!member) {
8902                         pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8903                                 map->name, moff);
8904                         return -EINVAL;
8905                 }
8906                 member_idx = member - btf_members(st_ops->type);
8907                 name = btf__name_by_offset(btf, member->name_off);
8908
8909                 if (!resolve_func_ptr(btf, member->type, NULL)) {
8910                         pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8911                                 map->name, name);
8912                         return -EINVAL;
8913                 }
8914
8915                 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8916                 if (!prog) {
8917                         pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8918                                 map->name, shdr_idx, name);
8919                         return -EINVAL;
8920                 }
8921
8922                 /* prevent the use of BPF prog with invalid type */
8923                 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
8924                         pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
8925                                 map->name, prog->name);
8926                         return -EINVAL;
8927                 }
8928
8929                 /* if we haven't yet processed this BPF program, record proper
8930                  * attach_btf_id and member_idx
8931                  */
8932                 if (!prog->attach_btf_id) {
8933                         prog->attach_btf_id = st_ops->type_id;
8934                         prog->expected_attach_type = member_idx;
8935                 }
8936
8937                 /* struct_ops BPF prog can be re-used between multiple
8938                  * .struct_ops as long as it's the same struct_ops struct
8939                  * definition and the same function pointer field
8940                  */
8941                 if (prog->attach_btf_id != st_ops->type_id ||
8942                     prog->expected_attach_type != member_idx) {
8943                         pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8944                                 map->name, prog->name, prog->sec_name, prog->type,
8945                                 prog->attach_btf_id, prog->expected_attach_type, name);
8946                         return -EINVAL;
8947                 }
8948
8949                 st_ops->progs[member_idx] = prog;
8950         }
8951
8952         return 0;
8953 }
8954
8955 #define BTF_TRACE_PREFIX "btf_trace_"
8956 #define BTF_LSM_PREFIX "bpf_lsm_"
8957 #define BTF_ITER_PREFIX "bpf_iter_"
8958 #define BTF_MAX_NAME_SIZE 128
8959
8960 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
8961                                 const char **prefix, int *kind)
8962 {
8963         switch (attach_type) {
8964         case BPF_TRACE_RAW_TP:
8965                 *prefix = BTF_TRACE_PREFIX;
8966                 *kind = BTF_KIND_TYPEDEF;
8967                 break;
8968         case BPF_LSM_MAC:
8969         case BPF_LSM_CGROUP:
8970                 *prefix = BTF_LSM_PREFIX;
8971                 *kind = BTF_KIND_FUNC;
8972                 break;
8973         case BPF_TRACE_ITER:
8974                 *prefix = BTF_ITER_PREFIX;
8975                 *kind = BTF_KIND_FUNC;
8976                 break;
8977         default:
8978                 *prefix = "";
8979                 *kind = BTF_KIND_FUNC;
8980         }
8981 }
8982
8983 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8984                                    const char *name, __u32 kind)
8985 {
8986         char btf_type_name[BTF_MAX_NAME_SIZE];
8987         int ret;
8988
8989         ret = snprintf(btf_type_name, sizeof(btf_type_name),
8990                        "%s%s", prefix, name);
8991         /* snprintf returns the number of characters written excluding the
8992          * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
8993          * indicates truncation.
8994          */
8995         if (ret < 0 || ret >= sizeof(btf_type_name))
8996                 return -ENAMETOOLONG;
8997         return btf__find_by_name_kind(btf, btf_type_name, kind);
8998 }
8999
9000 static inline int find_attach_btf_id(struct btf *btf, const char *name,
9001                                      enum bpf_attach_type attach_type)
9002 {
9003         const char *prefix;
9004         int kind;
9005
9006         btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9007         return find_btf_by_prefix_kind(btf, prefix, name, kind);
9008 }
9009
9010 int libbpf_find_vmlinux_btf_id(const char *name,
9011                                enum bpf_attach_type attach_type)
9012 {
9013         struct btf *btf;
9014         int err;
9015
9016         btf = btf__load_vmlinux_btf();
9017         err = libbpf_get_error(btf);
9018         if (err) {
9019                 pr_warn("vmlinux BTF is not found\n");
9020                 return libbpf_err(err);
9021         }
9022
9023         err = find_attach_btf_id(btf, name, attach_type);
9024         if (err <= 0)
9025                 pr_warn("%s is not found in vmlinux BTF\n", name);
9026
9027         btf__free(btf);
9028         return libbpf_err(err);
9029 }
9030
9031 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9032 {
9033         struct bpf_prog_info info;
9034         __u32 info_len = sizeof(info);
9035         struct btf *btf;
9036         int err;
9037
9038         memset(&info, 0, info_len);
9039         err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
9040         if (err) {
9041                 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
9042                         attach_prog_fd, err);
9043                 return err;
9044         }
9045
9046         err = -EINVAL;
9047         if (!info.btf_id) {
9048                 pr_warn("The target program doesn't have BTF\n");
9049                 goto out;
9050         }
9051         btf = btf__load_from_kernel_by_id(info.btf_id);
9052         err = libbpf_get_error(btf);
9053         if (err) {
9054                 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9055                 goto out;
9056         }
9057         err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9058         btf__free(btf);
9059         if (err <= 0) {
9060                 pr_warn("%s is not found in prog's BTF\n", name);
9061                 goto out;
9062         }
9063 out:
9064         return err;
9065 }
9066
9067 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9068                               enum bpf_attach_type attach_type,
9069                               int *btf_obj_fd, int *btf_type_id)
9070 {
9071         int ret, i;
9072
9073         ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9074         if (ret > 0) {
9075                 *btf_obj_fd = 0; /* vmlinux BTF */
9076                 *btf_type_id = ret;
9077                 return 0;
9078         }
9079         if (ret != -ENOENT)
9080                 return ret;
9081
9082         ret = load_module_btfs(obj);
9083         if (ret)
9084                 return ret;
9085
9086         for (i = 0; i < obj->btf_module_cnt; i++) {
9087                 const struct module_btf *mod = &obj->btf_modules[i];
9088
9089                 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9090                 if (ret > 0) {
9091                         *btf_obj_fd = mod->fd;
9092                         *btf_type_id = ret;
9093                         return 0;
9094                 }
9095                 if (ret == -ENOENT)
9096                         continue;
9097
9098                 return ret;
9099         }
9100
9101         return -ESRCH;
9102 }
9103
9104 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9105                                      int *btf_obj_fd, int *btf_type_id)
9106 {
9107         enum bpf_attach_type attach_type = prog->expected_attach_type;
9108         __u32 attach_prog_fd = prog->attach_prog_fd;
9109         int err = 0;
9110
9111         /* BPF program's BTF ID */
9112         if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9113                 if (!attach_prog_fd) {
9114                         pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9115                         return -EINVAL;
9116                 }
9117                 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9118                 if (err < 0) {
9119                         pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9120                                  prog->name, attach_prog_fd, attach_name, err);
9121                         return err;
9122                 }
9123                 *btf_obj_fd = 0;
9124                 *btf_type_id = err;
9125                 return 0;
9126         }
9127
9128         /* kernel/module BTF ID */
9129         if (prog->obj->gen_loader) {
9130                 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9131                 *btf_obj_fd = 0;
9132                 *btf_type_id = 1;
9133         } else {
9134                 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9135         }
9136         if (err) {
9137                 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9138                         prog->name, attach_name, err);
9139                 return err;
9140         }
9141         return 0;
9142 }
9143
9144 int libbpf_attach_type_by_name(const char *name,
9145                                enum bpf_attach_type *attach_type)
9146 {
9147         char *type_names;
9148         const struct bpf_sec_def *sec_def;
9149
9150         if (!name)
9151                 return libbpf_err(-EINVAL);
9152
9153         sec_def = find_sec_def(name);
9154         if (!sec_def) {
9155                 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9156                 type_names = libbpf_get_type_names(true);
9157                 if (type_names != NULL) {
9158                         pr_debug("attachable section(type) names are:%s\n", type_names);
9159                         free(type_names);
9160                 }
9161
9162                 return libbpf_err(-EINVAL);
9163         }
9164
9165         if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9166                 return libbpf_err(-EINVAL);
9167         if (!(sec_def->cookie & SEC_ATTACHABLE))
9168                 return libbpf_err(-EINVAL);
9169
9170         *attach_type = sec_def->expected_attach_type;
9171         return 0;
9172 }
9173
9174 int bpf_map__fd(const struct bpf_map *map)
9175 {
9176         return map ? map->fd : libbpf_err(-EINVAL);
9177 }
9178
9179 static bool map_uses_real_name(const struct bpf_map *map)
9180 {
9181         /* Since libbpf started to support custom .data.* and .rodata.* maps,
9182          * their user-visible name differs from kernel-visible name. Users see
9183          * such map's corresponding ELF section name as a map name.
9184          * This check distinguishes .data/.rodata from .data.* and .rodata.*
9185          * maps to know which name has to be returned to the user.
9186          */
9187         if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9188                 return true;
9189         if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9190                 return true;
9191         return false;
9192 }
9193
9194 const char *bpf_map__name(const struct bpf_map *map)
9195 {
9196         if (!map)
9197                 return NULL;
9198
9199         if (map_uses_real_name(map))
9200                 return map->real_name;
9201
9202         return map->name;
9203 }
9204
9205 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9206 {
9207         return map->def.type;
9208 }
9209
9210 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9211 {
9212         if (map->fd >= 0)
9213                 return libbpf_err(-EBUSY);
9214         map->def.type = type;
9215         return 0;
9216 }
9217
9218 __u32 bpf_map__map_flags(const struct bpf_map *map)
9219 {
9220         return map->def.map_flags;
9221 }
9222
9223 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9224 {
9225         if (map->fd >= 0)
9226                 return libbpf_err(-EBUSY);
9227         map->def.map_flags = flags;
9228         return 0;
9229 }
9230
9231 __u64 bpf_map__map_extra(const struct bpf_map *map)
9232 {
9233         return map->map_extra;
9234 }
9235
9236 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9237 {
9238         if (map->fd >= 0)
9239                 return libbpf_err(-EBUSY);
9240         map->map_extra = map_extra;
9241         return 0;
9242 }
9243
9244 __u32 bpf_map__numa_node(const struct bpf_map *map)
9245 {
9246         return map->numa_node;
9247 }
9248
9249 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9250 {
9251         if (map->fd >= 0)
9252                 return libbpf_err(-EBUSY);
9253         map->numa_node = numa_node;
9254         return 0;
9255 }
9256
9257 __u32 bpf_map__key_size(const struct bpf_map *map)
9258 {
9259         return map->def.key_size;
9260 }
9261
9262 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9263 {
9264         if (map->fd >= 0)
9265                 return libbpf_err(-EBUSY);
9266         map->def.key_size = size;
9267         return 0;
9268 }
9269
9270 __u32 bpf_map__value_size(const struct bpf_map *map)
9271 {
9272         return map->def.value_size;
9273 }
9274
9275 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9276 {
9277         if (map->fd >= 0)
9278                 return libbpf_err(-EBUSY);
9279         map->def.value_size = size;
9280         return 0;
9281 }
9282
9283 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9284 {
9285         return map ? map->btf_key_type_id : 0;
9286 }
9287
9288 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9289 {
9290         return map ? map->btf_value_type_id : 0;
9291 }
9292
9293 int bpf_map__set_initial_value(struct bpf_map *map,
9294                                const void *data, size_t size)
9295 {
9296         if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9297             size != map->def.value_size || map->fd >= 0)
9298                 return libbpf_err(-EINVAL);
9299
9300         memcpy(map->mmaped, data, size);
9301         return 0;
9302 }
9303
9304 const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
9305 {
9306         if (!map->mmaped)
9307                 return NULL;
9308         *psize = map->def.value_size;
9309         return map->mmaped;
9310 }
9311
9312 bool bpf_map__is_internal(const struct bpf_map *map)
9313 {
9314         return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9315 }
9316
9317 __u32 bpf_map__ifindex(const struct bpf_map *map)
9318 {
9319         return map->map_ifindex;
9320 }
9321
9322 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9323 {
9324         if (map->fd >= 0)
9325                 return libbpf_err(-EBUSY);
9326         map->map_ifindex = ifindex;
9327         return 0;
9328 }
9329
9330 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9331 {
9332         if (!bpf_map_type__is_map_in_map(map->def.type)) {
9333                 pr_warn("error: unsupported map type\n");
9334                 return libbpf_err(-EINVAL);
9335         }
9336         if (map->inner_map_fd != -1) {
9337                 pr_warn("error: inner_map_fd already specified\n");
9338                 return libbpf_err(-EINVAL);
9339         }
9340         if (map->inner_map) {
9341                 bpf_map__destroy(map->inner_map);
9342                 zfree(&map->inner_map);
9343         }
9344         map->inner_map_fd = fd;
9345         return 0;
9346 }
9347
9348 static struct bpf_map *
9349 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9350 {
9351         ssize_t idx;
9352         struct bpf_map *s, *e;
9353
9354         if (!obj || !obj->maps)
9355                 return errno = EINVAL, NULL;
9356
9357         s = obj->maps;
9358         e = obj->maps + obj->nr_maps;
9359
9360         if ((m < s) || (m >= e)) {
9361                 pr_warn("error in %s: map handler doesn't belong to object\n",
9362                          __func__);
9363                 return errno = EINVAL, NULL;
9364         }
9365
9366         idx = (m - obj->maps) + i;
9367         if (idx >= obj->nr_maps || idx < 0)
9368                 return NULL;
9369         return &obj->maps[idx];
9370 }
9371
9372 struct bpf_map *
9373 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
9374 {
9375         if (prev == NULL)
9376                 return obj->maps;
9377
9378         return __bpf_map__iter(prev, obj, 1);
9379 }
9380
9381 struct bpf_map *
9382 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
9383 {
9384         if (next == NULL) {
9385                 if (!obj->nr_maps)
9386                         return NULL;
9387                 return obj->maps + obj->nr_maps - 1;
9388         }
9389
9390         return __bpf_map__iter(next, obj, -1);
9391 }
9392
9393 struct bpf_map *
9394 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9395 {
9396         struct bpf_map *pos;
9397
9398         bpf_object__for_each_map(pos, obj) {
9399                 /* if it's a special internal map name (which always starts
9400                  * with dot) then check if that special name matches the
9401                  * real map name (ELF section name)
9402                  */
9403                 if (name[0] == '.') {
9404                         if (pos->real_name && strcmp(pos->real_name, name) == 0)
9405                                 return pos;
9406                         continue;
9407                 }
9408                 /* otherwise map name has to be an exact match */
9409                 if (map_uses_real_name(pos)) {
9410                         if (strcmp(pos->real_name, name) == 0)
9411                                 return pos;
9412                         continue;
9413                 }
9414                 if (strcmp(pos->name, name) == 0)
9415                         return pos;
9416         }
9417         return errno = ENOENT, NULL;
9418 }
9419
9420 int
9421 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9422 {
9423         return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9424 }
9425
9426 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
9427                            size_t value_sz, bool check_value_sz)
9428 {
9429         if (map->fd <= 0)
9430                 return -ENOENT;
9431
9432         if (map->def.key_size != key_sz) {
9433                 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
9434                         map->name, key_sz, map->def.key_size);
9435                 return -EINVAL;
9436         }
9437
9438         if (!check_value_sz)
9439                 return 0;
9440
9441         switch (map->def.type) {
9442         case BPF_MAP_TYPE_PERCPU_ARRAY:
9443         case BPF_MAP_TYPE_PERCPU_HASH:
9444         case BPF_MAP_TYPE_LRU_PERCPU_HASH:
9445         case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
9446                 int num_cpu = libbpf_num_possible_cpus();
9447                 size_t elem_sz = roundup(map->def.value_size, 8);
9448
9449                 if (value_sz != num_cpu * elem_sz) {
9450                         pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
9451                                 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
9452                         return -EINVAL;
9453                 }
9454                 break;
9455         }
9456         default:
9457                 if (map->def.value_size != value_sz) {
9458                         pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
9459                                 map->name, value_sz, map->def.value_size);
9460                         return -EINVAL;
9461                 }
9462                 break;
9463         }
9464         return 0;
9465 }
9466
9467 int bpf_map__lookup_elem(const struct bpf_map *map,
9468                          const void *key, size_t key_sz,
9469                          void *value, size_t value_sz, __u64 flags)
9470 {
9471         int err;
9472
9473         err = validate_map_op(map, key_sz, value_sz, true);
9474         if (err)
9475                 return libbpf_err(err);
9476
9477         return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
9478 }
9479
9480 int bpf_map__update_elem(const struct bpf_map *map,
9481                          const void *key, size_t key_sz,
9482                          const void *value, size_t value_sz, __u64 flags)
9483 {
9484         int err;
9485
9486         err = validate_map_op(map, key_sz, value_sz, true);
9487         if (err)
9488                 return libbpf_err(err);
9489
9490         return bpf_map_update_elem(map->fd, key, value, flags);
9491 }
9492
9493 int bpf_map__delete_elem(const struct bpf_map *map,
9494                          const void *key, size_t key_sz, __u64 flags)
9495 {
9496         int err;
9497
9498         err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
9499         if (err)
9500                 return libbpf_err(err);
9501
9502         return bpf_map_delete_elem_flags(map->fd, key, flags);
9503 }
9504
9505 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
9506                                     const void *key, size_t key_sz,
9507                                     void *value, size_t value_sz, __u64 flags)
9508 {
9509         int err;
9510
9511         err = validate_map_op(map, key_sz, value_sz, true);
9512         if (err)
9513                 return libbpf_err(err);
9514
9515         return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
9516 }
9517
9518 int bpf_map__get_next_key(const struct bpf_map *map,
9519                           const void *cur_key, void *next_key, size_t key_sz)
9520 {
9521         int err;
9522
9523         err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
9524         if (err)
9525                 return libbpf_err(err);
9526
9527         return bpf_map_get_next_key(map->fd, cur_key, next_key);
9528 }
9529
9530 long libbpf_get_error(const void *ptr)
9531 {
9532         if (!IS_ERR_OR_NULL(ptr))
9533                 return 0;
9534
9535         if (IS_ERR(ptr))
9536                 errno = -PTR_ERR(ptr);
9537
9538         /* If ptr == NULL, then errno should be already set by the failing
9539          * API, because libbpf never returns NULL on success and it now always
9540          * sets errno on error. So no extra errno handling for ptr == NULL
9541          * case.
9542          */
9543         return -errno;
9544 }
9545
9546 /* Replace link's underlying BPF program with the new one */
9547 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9548 {
9549         int ret;
9550
9551         ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9552         return libbpf_err_errno(ret);
9553 }
9554
9555 /* Release "ownership" of underlying BPF resource (typically, BPF program
9556  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
9557  * link, when destructed through bpf_link__destroy() call won't attempt to
9558  * detach/unregisted that BPF resource. This is useful in situations where,
9559  * say, attached BPF program has to outlive userspace program that attached it
9560  * in the system. Depending on type of BPF program, though, there might be
9561  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
9562  * exit of userspace program doesn't trigger automatic detachment and clean up
9563  * inside the kernel.
9564  */
9565 void bpf_link__disconnect(struct bpf_link *link)
9566 {
9567         link->disconnected = true;
9568 }
9569
9570 int bpf_link__destroy(struct bpf_link *link)
9571 {
9572         int err = 0;
9573
9574         if (IS_ERR_OR_NULL(link))
9575                 return 0;
9576
9577         if (!link->disconnected && link->detach)
9578                 err = link->detach(link);
9579         if (link->pin_path)
9580                 free(link->pin_path);
9581         if (link->dealloc)
9582                 link->dealloc(link);
9583         else
9584                 free(link);
9585
9586         return libbpf_err(err);
9587 }
9588
9589 int bpf_link__fd(const struct bpf_link *link)
9590 {
9591         return link->fd;
9592 }
9593
9594 const char *bpf_link__pin_path(const struct bpf_link *link)
9595 {
9596         return link->pin_path;
9597 }
9598
9599 static int bpf_link__detach_fd(struct bpf_link *link)
9600 {
9601         return libbpf_err_errno(close(link->fd));
9602 }
9603
9604 struct bpf_link *bpf_link__open(const char *path)
9605 {
9606         struct bpf_link *link;
9607         int fd;
9608
9609         fd = bpf_obj_get(path);
9610         if (fd < 0) {
9611                 fd = -errno;
9612                 pr_warn("failed to open link at %s: %d\n", path, fd);
9613                 return libbpf_err_ptr(fd);
9614         }
9615
9616         link = calloc(1, sizeof(*link));
9617         if (!link) {
9618                 close(fd);
9619                 return libbpf_err_ptr(-ENOMEM);
9620         }
9621         link->detach = &bpf_link__detach_fd;
9622         link->fd = fd;
9623
9624         link->pin_path = strdup(path);
9625         if (!link->pin_path) {
9626                 bpf_link__destroy(link);
9627                 return libbpf_err_ptr(-ENOMEM);
9628         }
9629
9630         return link;
9631 }
9632
9633 int bpf_link__detach(struct bpf_link *link)
9634 {
9635         return bpf_link_detach(link->fd) ? -errno : 0;
9636 }
9637
9638 int bpf_link__pin(struct bpf_link *link, const char *path)
9639 {
9640         int err;
9641
9642         if (link->pin_path)
9643                 return libbpf_err(-EBUSY);
9644         err = make_parent_dir(path);
9645         if (err)
9646                 return libbpf_err(err);
9647         err = check_path(path);
9648         if (err)
9649                 return libbpf_err(err);
9650
9651         link->pin_path = strdup(path);
9652         if (!link->pin_path)
9653                 return libbpf_err(-ENOMEM);
9654
9655         if (bpf_obj_pin(link->fd, link->pin_path)) {
9656                 err = -errno;
9657                 zfree(&link->pin_path);
9658                 return libbpf_err(err);
9659         }
9660
9661         pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9662         return 0;
9663 }
9664
9665 int bpf_link__unpin(struct bpf_link *link)
9666 {
9667         int err;
9668
9669         if (!link->pin_path)
9670                 return libbpf_err(-EINVAL);
9671
9672         err = unlink(link->pin_path);
9673         if (err != 0)
9674                 return -errno;
9675
9676         pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9677         zfree(&link->pin_path);
9678         return 0;
9679 }
9680
9681 struct bpf_link_perf {
9682         struct bpf_link link;
9683         int perf_event_fd;
9684         /* legacy kprobe support: keep track of probe identifier and type */
9685         char *legacy_probe_name;
9686         bool legacy_is_kprobe;
9687         bool legacy_is_retprobe;
9688 };
9689
9690 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
9691 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
9692
9693 static int bpf_link_perf_detach(struct bpf_link *link)
9694 {
9695         struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9696         int err = 0;
9697
9698         if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
9699                 err = -errno;
9700
9701         if (perf_link->perf_event_fd != link->fd)
9702                 close(perf_link->perf_event_fd);
9703         close(link->fd);
9704
9705         /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
9706         if (perf_link->legacy_probe_name) {
9707                 if (perf_link->legacy_is_kprobe) {
9708                         err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
9709                                                          perf_link->legacy_is_retprobe);
9710                 } else {
9711                         err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
9712                                                          perf_link->legacy_is_retprobe);
9713                 }
9714         }
9715
9716         return err;
9717 }
9718
9719 static void bpf_link_perf_dealloc(struct bpf_link *link)
9720 {
9721         struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9722
9723         free(perf_link->legacy_probe_name);
9724         free(perf_link);
9725 }
9726
9727 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
9728                                                      const struct bpf_perf_event_opts *opts)
9729 {
9730         char errmsg[STRERR_BUFSIZE];
9731         struct bpf_link_perf *link;
9732         int prog_fd, link_fd = -1, err;
9733         bool force_ioctl_attach;
9734
9735         if (!OPTS_VALID(opts, bpf_perf_event_opts))
9736                 return libbpf_err_ptr(-EINVAL);
9737
9738         if (pfd < 0) {
9739                 pr_warn("prog '%s': invalid perf event FD %d\n",
9740                         prog->name, pfd);
9741                 return libbpf_err_ptr(-EINVAL);
9742         }
9743         prog_fd = bpf_program__fd(prog);
9744         if (prog_fd < 0) {
9745                 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9746                         prog->name);
9747                 return libbpf_err_ptr(-EINVAL);
9748         }
9749
9750         link = calloc(1, sizeof(*link));
9751         if (!link)
9752                 return libbpf_err_ptr(-ENOMEM);
9753         link->link.detach = &bpf_link_perf_detach;
9754         link->link.dealloc = &bpf_link_perf_dealloc;
9755         link->perf_event_fd = pfd;
9756
9757         force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
9758         if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
9759                 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
9760                         .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
9761
9762                 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
9763                 if (link_fd < 0) {
9764                         err = -errno;
9765                         pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
9766                                 prog->name, pfd,
9767                                 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9768                         goto err_out;
9769                 }
9770                 link->link.fd = link_fd;
9771         } else {
9772                 if (OPTS_GET(opts, bpf_cookie, 0)) {
9773                         pr_warn("prog '%s': user context value is not supported\n", prog->name);
9774                         err = -EOPNOTSUPP;
9775                         goto err_out;
9776                 }
9777
9778                 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9779                         err = -errno;
9780                         pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
9781                                 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9782                         if (err == -EPROTO)
9783                                 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9784                                         prog->name, pfd);
9785                         goto err_out;
9786                 }
9787                 link->link.fd = pfd;
9788         }
9789         if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9790                 err = -errno;
9791                 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
9792                         prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9793                 goto err_out;
9794         }
9795
9796         return &link->link;
9797 err_out:
9798         if (link_fd >= 0)
9799                 close(link_fd);
9800         free(link);
9801         return libbpf_err_ptr(err);
9802 }
9803
9804 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
9805 {
9806         return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
9807 }
9808
9809 /*
9810  * this function is expected to parse integer in the range of [0, 2^31-1] from
9811  * given file using scanf format string fmt. If actual parsed value is
9812  * negative, the result might be indistinguishable from error
9813  */
9814 static int parse_uint_from_file(const char *file, const char *fmt)
9815 {
9816         char buf[STRERR_BUFSIZE];
9817         int err, ret;
9818         FILE *f;
9819
9820         f = fopen(file, "r");
9821         if (!f) {
9822                 err = -errno;
9823                 pr_debug("failed to open '%s': %s\n", file,
9824                          libbpf_strerror_r(err, buf, sizeof(buf)));
9825                 return err;
9826         }
9827         err = fscanf(f, fmt, &ret);
9828         if (err != 1) {
9829                 err = err == EOF ? -EIO : -errno;
9830                 pr_debug("failed to parse '%s': %s\n", file,
9831                         libbpf_strerror_r(err, buf, sizeof(buf)));
9832                 fclose(f);
9833                 return err;
9834         }
9835         fclose(f);
9836         return ret;
9837 }
9838
9839 static int determine_kprobe_perf_type(void)
9840 {
9841         const char *file = "/sys/bus/event_source/devices/kprobe/type";
9842
9843         return parse_uint_from_file(file, "%d\n");
9844 }
9845
9846 static int determine_uprobe_perf_type(void)
9847 {
9848         const char *file = "/sys/bus/event_source/devices/uprobe/type";
9849
9850         return parse_uint_from_file(file, "%d\n");
9851 }
9852
9853 static int determine_kprobe_retprobe_bit(void)
9854 {
9855         const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9856
9857         return parse_uint_from_file(file, "config:%d\n");
9858 }
9859
9860 static int determine_uprobe_retprobe_bit(void)
9861 {
9862         const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9863
9864         return parse_uint_from_file(file, "config:%d\n");
9865 }
9866
9867 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
9868 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
9869
9870 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9871                                  uint64_t offset, int pid, size_t ref_ctr_off)
9872 {
9873         const size_t attr_sz = sizeof(struct perf_event_attr);
9874         struct perf_event_attr attr;
9875         char errmsg[STRERR_BUFSIZE];
9876         int type, pfd;
9877
9878         if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
9879                 return -EINVAL;
9880
9881         memset(&attr, 0, attr_sz);
9882
9883         type = uprobe ? determine_uprobe_perf_type()
9884                       : determine_kprobe_perf_type();
9885         if (type < 0) {
9886                 pr_warn("failed to determine %s perf type: %s\n",
9887                         uprobe ? "uprobe" : "kprobe",
9888                         libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9889                 return type;
9890         }
9891         if (retprobe) {
9892                 int bit = uprobe ? determine_uprobe_retprobe_bit()
9893                                  : determine_kprobe_retprobe_bit();
9894
9895                 if (bit < 0) {
9896                         pr_warn("failed to determine %s retprobe bit: %s\n",
9897                                 uprobe ? "uprobe" : "kprobe",
9898                                 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9899                         return bit;
9900                 }
9901                 attr.config |= 1 << bit;
9902         }
9903         attr.size = attr_sz;
9904         attr.type = type;
9905         attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
9906         attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9907         attr.config2 = offset;           /* kprobe_addr or probe_offset */
9908
9909         /* pid filter is meaningful only for uprobes */
9910         pfd = syscall(__NR_perf_event_open, &attr,
9911                       pid < 0 ? -1 : pid /* pid */,
9912                       pid == -1 ? 0 : -1 /* cpu */,
9913                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9914         return pfd >= 0 ? pfd : -errno;
9915 }
9916
9917 static int append_to_file(const char *file, const char *fmt, ...)
9918 {
9919         int fd, n, err = 0;
9920         va_list ap;
9921         char buf[1024];
9922
9923         va_start(ap, fmt);
9924         n = vsnprintf(buf, sizeof(buf), fmt, ap);
9925         va_end(ap);
9926
9927         if (n < 0 || n >= sizeof(buf))
9928                 return -EINVAL;
9929
9930         fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
9931         if (fd < 0)
9932                 return -errno;
9933
9934         if (write(fd, buf, n) < 0)
9935                 err = -errno;
9936
9937         close(fd);
9938         return err;
9939 }
9940
9941 #define DEBUGFS "/sys/kernel/debug/tracing"
9942 #define TRACEFS "/sys/kernel/tracing"
9943
9944 static bool use_debugfs(void)
9945 {
9946         static int has_debugfs = -1;
9947
9948         if (has_debugfs < 0)
9949                 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
9950
9951         return has_debugfs == 1;
9952 }
9953
9954 static const char *tracefs_path(void)
9955 {
9956         return use_debugfs() ? DEBUGFS : TRACEFS;
9957 }
9958
9959 static const char *tracefs_kprobe_events(void)
9960 {
9961         return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
9962 }
9963
9964 static const char *tracefs_uprobe_events(void)
9965 {
9966         return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
9967 }
9968
9969 static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
9970                                          const char *kfunc_name, size_t offset)
9971 {
9972         static int index = 0;
9973         int i;
9974
9975         snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
9976                  __sync_fetch_and_add(&index, 1));
9977
9978         /* sanitize binary_path in the probe name */
9979         for (i = 0; buf[i]; i++) {
9980                 if (!isalnum(buf[i]))
9981                         buf[i] = '_';
9982         }
9983 }
9984
9985 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
9986                                    const char *kfunc_name, size_t offset)
9987 {
9988         return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
9989                               retprobe ? 'r' : 'p',
9990                               retprobe ? "kretprobes" : "kprobes",
9991                               probe_name, kfunc_name, offset);
9992 }
9993
9994 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
9995 {
9996         return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
9997                               retprobe ? "kretprobes" : "kprobes", probe_name);
9998 }
9999
10000 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10001 {
10002         char file[256];
10003
10004         snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10005                  tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
10006
10007         return parse_uint_from_file(file, "%d\n");
10008 }
10009
10010 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10011                                          const char *kfunc_name, size_t offset, int pid)
10012 {
10013         const size_t attr_sz = sizeof(struct perf_event_attr);
10014         struct perf_event_attr attr;
10015         char errmsg[STRERR_BUFSIZE];
10016         int type, pfd, err;
10017
10018         err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10019         if (err < 0) {
10020                 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10021                         kfunc_name, offset,
10022                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10023                 return err;
10024         }
10025         type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10026         if (type < 0) {
10027                 err = type;
10028                 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10029                         kfunc_name, offset,
10030                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10031                 goto err_clean_legacy;
10032         }
10033
10034         memset(&attr, 0, attr_sz);
10035         attr.size = attr_sz;
10036         attr.config = type;
10037         attr.type = PERF_TYPE_TRACEPOINT;
10038
10039         pfd = syscall(__NR_perf_event_open, &attr,
10040                       pid < 0 ? -1 : pid, /* pid */
10041                       pid == -1 ? 0 : -1, /* cpu */
10042                       -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
10043         if (pfd < 0) {
10044                 err = -errno;
10045                 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10046                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10047                 goto err_clean_legacy;
10048         }
10049         return pfd;
10050
10051 err_clean_legacy:
10052         /* Clear the newly added legacy kprobe_event */
10053         remove_kprobe_event_legacy(probe_name, retprobe);
10054         return err;
10055 }
10056
10057 static const char *arch_specific_syscall_pfx(void)
10058 {
10059 #if defined(__x86_64__)
10060         return "x64";
10061 #elif defined(__i386__)
10062         return "ia32";
10063 #elif defined(__s390x__)
10064         return "s390x";
10065 #elif defined(__s390__)
10066         return "s390";
10067 #elif defined(__arm__)
10068         return "arm";
10069 #elif defined(__aarch64__)
10070         return "arm64";
10071 #elif defined(__mips__)
10072         return "mips";
10073 #elif defined(__riscv)
10074         return "riscv";
10075 #elif defined(__powerpc__)
10076         return "powerpc";
10077 #elif defined(__powerpc64__)
10078         return "powerpc64";
10079 #else
10080         return NULL;
10081 #endif
10082 }
10083
10084 static int probe_kern_syscall_wrapper(void)
10085 {
10086         char syscall_name[64];
10087         const char *ksys_pfx;
10088
10089         ksys_pfx = arch_specific_syscall_pfx();
10090         if (!ksys_pfx)
10091                 return 0;
10092
10093         snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
10094
10095         if (determine_kprobe_perf_type() >= 0) {
10096                 int pfd;
10097
10098                 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
10099                 if (pfd >= 0)
10100                         close(pfd);
10101
10102                 return pfd >= 0 ? 1 : 0;
10103         } else { /* legacy mode */
10104                 char probe_name[128];
10105
10106                 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
10107                 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
10108                         return 0;
10109
10110                 (void)remove_kprobe_event_legacy(probe_name, false);
10111                 return 1;
10112         }
10113 }
10114
10115 struct bpf_link *
10116 bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
10117                                 const char *func_name,
10118                                 const struct bpf_kprobe_opts *opts)
10119 {
10120         DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10121         enum probe_attach_mode attach_mode;
10122         char errmsg[STRERR_BUFSIZE];
10123         char *legacy_probe = NULL;
10124         struct bpf_link *link;
10125         size_t offset;
10126         bool retprobe, legacy;
10127         int pfd, err;
10128
10129         if (!OPTS_VALID(opts, bpf_kprobe_opts))
10130                 return libbpf_err_ptr(-EINVAL);
10131
10132         attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
10133         retprobe = OPTS_GET(opts, retprobe, false);
10134         offset = OPTS_GET(opts, offset, 0);
10135         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10136
10137         legacy = determine_kprobe_perf_type() < 0;
10138         switch (attach_mode) {
10139         case PROBE_ATTACH_MODE_LEGACY:
10140                 legacy = true;
10141                 pe_opts.force_ioctl_attach = true;
10142                 break;
10143         case PROBE_ATTACH_MODE_PERF:
10144                 if (legacy)
10145                         return libbpf_err_ptr(-ENOTSUP);
10146                 pe_opts.force_ioctl_attach = true;
10147                 break;
10148         case PROBE_ATTACH_MODE_LINK:
10149                 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
10150                         return libbpf_err_ptr(-ENOTSUP);
10151                 break;
10152         case PROBE_ATTACH_MODE_DEFAULT:
10153                 break;
10154         default:
10155                 return libbpf_err_ptr(-EINVAL);
10156         }
10157
10158         if (!legacy) {
10159                 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
10160                                             func_name, offset,
10161                                             -1 /* pid */, 0 /* ref_ctr_off */);
10162         } else {
10163                 char probe_name[256];
10164
10165                 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
10166                                              func_name, offset);
10167
10168                 legacy_probe = strdup(probe_name);
10169                 if (!legacy_probe)
10170                         return libbpf_err_ptr(-ENOMEM);
10171
10172                 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
10173                                                     offset, -1 /* pid */);
10174         }
10175         if (pfd < 0) {
10176                 err = -errno;
10177                 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
10178                         prog->name, retprobe ? "kretprobe" : "kprobe",
10179                         func_name, offset,
10180                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10181                 goto err_out;
10182         }
10183         link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10184         err = libbpf_get_error(link);
10185         if (err) {
10186                 close(pfd);
10187                 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
10188                         prog->name, retprobe ? "kretprobe" : "kprobe",
10189                         func_name, offset,
10190                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10191                 goto err_clean_legacy;
10192         }
10193         if (legacy) {
10194                 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10195
10196                 perf_link->legacy_probe_name = legacy_probe;
10197                 perf_link->legacy_is_kprobe = true;
10198                 perf_link->legacy_is_retprobe = retprobe;
10199         }
10200
10201         return link;
10202
10203 err_clean_legacy:
10204         if (legacy)
10205                 remove_kprobe_event_legacy(legacy_probe, retprobe);
10206 err_out:
10207         free(legacy_probe);
10208         return libbpf_err_ptr(err);
10209 }
10210
10211 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
10212                                             bool retprobe,
10213                                             const char *func_name)
10214 {
10215         DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
10216                 .retprobe = retprobe,
10217         );
10218
10219         return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
10220 }
10221
10222 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
10223                                               const char *syscall_name,
10224                                               const struct bpf_ksyscall_opts *opts)
10225 {
10226         LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
10227         char func_name[128];
10228
10229         if (!OPTS_VALID(opts, bpf_ksyscall_opts))
10230                 return libbpf_err_ptr(-EINVAL);
10231
10232         if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
10233                 /* arch_specific_syscall_pfx() should never return NULL here
10234                  * because it is guarded by kernel_supports(). However, since
10235                  * compiler does not know that we have an explicit conditional
10236                  * as well.
10237                  */
10238                 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
10239                          arch_specific_syscall_pfx() ? : "", syscall_name);
10240         } else {
10241                 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
10242         }
10243
10244         kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
10245         kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10246
10247         return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
10248 }
10249
10250 /* Adapted from perf/util/string.c */
10251 static bool glob_match(const char *str, const char *pat)
10252 {
10253         while (*str && *pat && *pat != '*') {
10254                 if (*pat == '?') {      /* Matches any single character */
10255                         str++;
10256                         pat++;
10257                         continue;
10258                 }
10259                 if (*str != *pat)
10260                         return false;
10261                 str++;
10262                 pat++;
10263         }
10264         /* Check wild card */
10265         if (*pat == '*') {
10266                 while (*pat == '*')
10267                         pat++;
10268                 if (!*pat) /* Tail wild card matches all */
10269                         return true;
10270                 while (*str)
10271                         if (glob_match(str++, pat))
10272                                 return true;
10273         }
10274         return !*str && !*pat;
10275 }
10276
10277 struct kprobe_multi_resolve {
10278         const char *pattern;
10279         unsigned long *addrs;
10280         size_t cap;
10281         size_t cnt;
10282 };
10283
10284 static int
10285 resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type,
10286                         const char *sym_name, void *ctx)
10287 {
10288         struct kprobe_multi_resolve *res = ctx;
10289         int err;
10290
10291         if (!glob_match(sym_name, res->pattern))
10292                 return 0;
10293
10294         err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long),
10295                                 res->cnt + 1);
10296         if (err)
10297                 return err;
10298
10299         res->addrs[res->cnt++] = (unsigned long) sym_addr;
10300         return 0;
10301 }
10302
10303 struct bpf_link *
10304 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
10305                                       const char *pattern,
10306                                       const struct bpf_kprobe_multi_opts *opts)
10307 {
10308         LIBBPF_OPTS(bpf_link_create_opts, lopts);
10309         struct kprobe_multi_resolve res = {
10310                 .pattern = pattern,
10311         };
10312         struct bpf_link *link = NULL;
10313         char errmsg[STRERR_BUFSIZE];
10314         const unsigned long *addrs;
10315         int err, link_fd, prog_fd;
10316         const __u64 *cookies;
10317         const char **syms;
10318         bool retprobe;
10319         size_t cnt;
10320
10321         if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
10322                 return libbpf_err_ptr(-EINVAL);
10323
10324         syms    = OPTS_GET(opts, syms, false);
10325         addrs   = OPTS_GET(opts, addrs, false);
10326         cnt     = OPTS_GET(opts, cnt, false);
10327         cookies = OPTS_GET(opts, cookies, false);
10328
10329         if (!pattern && !addrs && !syms)
10330                 return libbpf_err_ptr(-EINVAL);
10331         if (pattern && (addrs || syms || cookies || cnt))
10332                 return libbpf_err_ptr(-EINVAL);
10333         if (!pattern && !cnt)
10334                 return libbpf_err_ptr(-EINVAL);
10335         if (addrs && syms)
10336                 return libbpf_err_ptr(-EINVAL);
10337
10338         if (pattern) {
10339                 err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
10340                 if (err)
10341                         goto error;
10342                 if (!res.cnt) {
10343                         err = -ENOENT;
10344                         goto error;
10345                 }
10346                 addrs = res.addrs;
10347                 cnt = res.cnt;
10348         }
10349
10350         retprobe = OPTS_GET(opts, retprobe, false);
10351
10352         lopts.kprobe_multi.syms = syms;
10353         lopts.kprobe_multi.addrs = addrs;
10354         lopts.kprobe_multi.cookies = cookies;
10355         lopts.kprobe_multi.cnt = cnt;
10356         lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
10357
10358         link = calloc(1, sizeof(*link));
10359         if (!link) {
10360                 err = -ENOMEM;
10361                 goto error;
10362         }
10363         link->detach = &bpf_link__detach_fd;
10364
10365         prog_fd = bpf_program__fd(prog);
10366         link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
10367         if (link_fd < 0) {
10368                 err = -errno;
10369                 pr_warn("prog '%s': failed to attach: %s\n",
10370                         prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10371                 goto error;
10372         }
10373         link->fd = link_fd;
10374         free(res.addrs);
10375         return link;
10376
10377 error:
10378         free(link);
10379         free(res.addrs);
10380         return libbpf_err_ptr(err);
10381 }
10382
10383 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10384 {
10385         DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
10386         unsigned long offset = 0;
10387         const char *func_name;
10388         char *func;
10389         int n;
10390
10391         *link = NULL;
10392
10393         /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
10394         if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
10395                 return 0;
10396
10397         opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
10398         if (opts.retprobe)
10399                 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
10400         else
10401                 func_name = prog->sec_name + sizeof("kprobe/") - 1;
10402
10403         n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
10404         if (n < 1) {
10405                 pr_warn("kprobe name is invalid: %s\n", func_name);
10406                 return -EINVAL;
10407         }
10408         if (opts.retprobe && offset != 0) {
10409                 free(func);
10410                 pr_warn("kretprobes do not support offset specification\n");
10411                 return -EINVAL;
10412         }
10413
10414         opts.offset = offset;
10415         *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
10416         free(func);
10417         return libbpf_get_error(*link);
10418 }
10419
10420 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10421 {
10422         LIBBPF_OPTS(bpf_ksyscall_opts, opts);
10423         const char *syscall_name;
10424
10425         *link = NULL;
10426
10427         /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
10428         if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
10429                 return 0;
10430
10431         opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
10432         if (opts.retprobe)
10433                 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
10434         else
10435                 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
10436
10437         *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
10438         return *link ? 0 : -errno;
10439 }
10440
10441 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10442 {
10443         LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
10444         const char *spec;
10445         char *pattern;
10446         int n;
10447
10448         *link = NULL;
10449
10450         /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
10451         if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
10452             strcmp(prog->sec_name, "kretprobe.multi") == 0)
10453                 return 0;
10454
10455         opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
10456         if (opts.retprobe)
10457                 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
10458         else
10459                 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
10460
10461         n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
10462         if (n < 1) {
10463                 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
10464                 return -EINVAL;
10465         }
10466
10467         *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
10468         free(pattern);
10469         return libbpf_get_error(*link);
10470 }
10471
10472 static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
10473                                          const char *binary_path, uint64_t offset)
10474 {
10475         int i;
10476
10477         snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
10478
10479         /* sanitize binary_path in the probe name */
10480         for (i = 0; buf[i]; i++) {
10481                 if (!isalnum(buf[i]))
10482                         buf[i] = '_';
10483         }
10484 }
10485
10486 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
10487                                           const char *binary_path, size_t offset)
10488 {
10489         return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
10490                               retprobe ? 'r' : 'p',
10491                               retprobe ? "uretprobes" : "uprobes",
10492                               probe_name, binary_path, offset);
10493 }
10494
10495 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
10496 {
10497         return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
10498                               retprobe ? "uretprobes" : "uprobes", probe_name);
10499 }
10500
10501 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10502 {
10503         char file[512];
10504
10505         snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10506                  tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
10507
10508         return parse_uint_from_file(file, "%d\n");
10509 }
10510
10511 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
10512                                          const char *binary_path, size_t offset, int pid)
10513 {
10514         const size_t attr_sz = sizeof(struct perf_event_attr);
10515         struct perf_event_attr attr;
10516         int type, pfd, err;
10517
10518         err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
10519         if (err < 0) {
10520                 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
10521                         binary_path, (size_t)offset, err);
10522                 return err;
10523         }
10524         type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
10525         if (type < 0) {
10526                 err = type;
10527                 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
10528                         binary_path, offset, err);
10529                 goto err_clean_legacy;
10530         }
10531
10532         memset(&attr, 0, attr_sz);
10533         attr.size = attr_sz;
10534         attr.config = type;
10535         attr.type = PERF_TYPE_TRACEPOINT;
10536
10537         pfd = syscall(__NR_perf_event_open, &attr,
10538                       pid < 0 ? -1 : pid, /* pid */
10539                       pid == -1 ? 0 : -1, /* cpu */
10540                       -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
10541         if (pfd < 0) {
10542                 err = -errno;
10543                 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
10544                 goto err_clean_legacy;
10545         }
10546         return pfd;
10547
10548 err_clean_legacy:
10549         /* Clear the newly added legacy uprobe_event */
10550         remove_uprobe_event_legacy(probe_name, retprobe);
10551         return err;
10552 }
10553
10554 /* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
10555 static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
10556 {
10557         while ((scn = elf_nextscn(elf, scn)) != NULL) {
10558                 GElf_Shdr sh;
10559
10560                 if (!gelf_getshdr(scn, &sh))
10561                         continue;
10562                 if (sh.sh_type == sh_type)
10563                         return scn;
10564         }
10565         return NULL;
10566 }
10567
10568 /* Find offset of function name in the provided ELF object. "binary_path" is
10569  * the path to the ELF binary represented by "elf", and only used for error
10570  * reporting matters. "name" matches symbol name or name@@LIB for library
10571  * functions.
10572  */
10573 static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
10574 {
10575         int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
10576         bool is_shared_lib, is_name_qualified;
10577         long ret = -ENOENT;
10578         size_t name_len;
10579         GElf_Ehdr ehdr;
10580
10581         if (!gelf_getehdr(elf, &ehdr)) {
10582                 pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
10583                 ret = -LIBBPF_ERRNO__FORMAT;
10584                 goto out;
10585         }
10586         /* for shared lib case, we do not need to calculate relative offset */
10587         is_shared_lib = ehdr.e_type == ET_DYN;
10588
10589         name_len = strlen(name);
10590         /* Does name specify "@@LIB"? */
10591         is_name_qualified = strstr(name, "@@") != NULL;
10592
10593         /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
10594          * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
10595          * linked binary may not have SHT_DYMSYM, so absence of a section should not be
10596          * reported as a warning/error.
10597          */
10598         for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
10599                 size_t nr_syms, strtabidx, idx;
10600                 Elf_Data *symbols = NULL;
10601                 Elf_Scn *scn = NULL;
10602                 int last_bind = -1;
10603                 const char *sname;
10604                 GElf_Shdr sh;
10605
10606                 scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL);
10607                 if (!scn) {
10608                         pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
10609                                  binary_path);
10610                         continue;
10611                 }
10612                 if (!gelf_getshdr(scn, &sh))
10613                         continue;
10614                 strtabidx = sh.sh_link;
10615                 symbols = elf_getdata(scn, 0);
10616                 if (!symbols) {
10617                         pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
10618                                 binary_path, elf_errmsg(-1));
10619                         ret = -LIBBPF_ERRNO__FORMAT;
10620                         goto out;
10621                 }
10622                 nr_syms = symbols->d_size / sh.sh_entsize;
10623
10624                 for (idx = 0; idx < nr_syms; idx++) {
10625                         int curr_bind;
10626                         GElf_Sym sym;
10627                         Elf_Scn *sym_scn;
10628                         GElf_Shdr sym_sh;
10629
10630                         if (!gelf_getsym(symbols, idx, &sym))
10631                                 continue;
10632
10633                         if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
10634                                 continue;
10635
10636                         sname = elf_strptr(elf, strtabidx, sym.st_name);
10637                         if (!sname)
10638                                 continue;
10639
10640                         curr_bind = GELF_ST_BIND(sym.st_info);
10641
10642                         /* User can specify func, func@@LIB or func@@LIB_VERSION. */
10643                         if (strncmp(sname, name, name_len) != 0)
10644                                 continue;
10645                         /* ...but we don't want a search for "foo" to match 'foo2" also, so any
10646                          * additional characters in sname should be of the form "@@LIB".
10647                          */
10648                         if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@')
10649                                 continue;
10650
10651                         if (ret >= 0) {
10652                                 /* handle multiple matches */
10653                                 if (last_bind != STB_WEAK && curr_bind != STB_WEAK) {
10654                                         /* Only accept one non-weak bind. */
10655                                         pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
10656                                                 sname, name, binary_path);
10657                                         ret = -LIBBPF_ERRNO__FORMAT;
10658                                         goto out;
10659                                 } else if (curr_bind == STB_WEAK) {
10660                                         /* already have a non-weak bind, and
10661                                          * this is a weak bind, so ignore.
10662                                          */
10663                                         continue;
10664                                 }
10665                         }
10666
10667                         /* Transform symbol's virtual address (absolute for
10668                          * binaries and relative for shared libs) into file
10669                          * offset, which is what kernel is expecting for
10670                          * uprobe/uretprobe attachment.
10671                          * See Documentation/trace/uprobetracer.rst for more
10672                          * details.
10673                          * This is done by looking up symbol's containing
10674                          * section's header and using it's virtual address
10675                          * (sh_addr) and corresponding file offset (sh_offset)
10676                          * to transform sym.st_value (virtual address) into
10677                          * desired final file offset.
10678                          */
10679                         sym_scn = elf_getscn(elf, sym.st_shndx);
10680                         if (!sym_scn)
10681                                 continue;
10682                         if (!gelf_getshdr(sym_scn, &sym_sh))
10683                                 continue;
10684
10685                         ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset;
10686                         last_bind = curr_bind;
10687                 }
10688                 if (ret > 0)
10689                         break;
10690         }
10691
10692         if (ret > 0) {
10693                 pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
10694                          ret);
10695         } else {
10696                 if (ret == 0) {
10697                         pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
10698                                 is_shared_lib ? "should not be 0 in a shared library" :
10699                                                 "try using shared library path instead");
10700                         ret = -ENOENT;
10701                 } else {
10702                         pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
10703                 }
10704         }
10705 out:
10706         return ret;
10707 }
10708
10709 /* Find offset of function name in ELF object specified by path. "name" matches
10710  * symbol name or name@@LIB for library functions.
10711  */
10712 static long elf_find_func_offset_from_file(const char *binary_path, const char *name)
10713 {
10714         char errmsg[STRERR_BUFSIZE];
10715         long ret = -ENOENT;
10716         Elf *elf;
10717         int fd;
10718
10719         fd = open(binary_path, O_RDONLY | O_CLOEXEC);
10720         if (fd < 0) {
10721                 ret = -errno;
10722                 pr_warn("failed to open %s: %s\n", binary_path,
10723                         libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
10724                 return ret;
10725         }
10726         elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
10727         if (!elf) {
10728                 pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
10729                 close(fd);
10730                 return -LIBBPF_ERRNO__FORMAT;
10731         }
10732
10733         ret = elf_find_func_offset(elf, binary_path, name);
10734         elf_end(elf);
10735         close(fd);
10736         return ret;
10737 }
10738
10739 /* Find offset of function name in archive specified by path. Currently
10740  * supported are .zip files that do not compress their contents, as used on
10741  * Android in the form of APKs, for example. "file_name" is the name of the ELF
10742  * file inside the archive. "func_name" matches symbol name or name@@LIB for
10743  * library functions.
10744  *
10745  * An overview of the APK format specifically provided here:
10746  * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
10747  */
10748 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
10749                                               const char *func_name)
10750 {
10751         struct zip_archive *archive;
10752         struct zip_entry entry;
10753         long ret;
10754         Elf *elf;
10755
10756         archive = zip_archive_open(archive_path);
10757         if (IS_ERR(archive)) {
10758                 ret = PTR_ERR(archive);
10759                 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
10760                 return ret;
10761         }
10762
10763         ret = zip_archive_find_entry(archive, file_name, &entry);
10764         if (ret) {
10765                 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
10766                         archive_path, ret);
10767                 goto out;
10768         }
10769         pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
10770                  (unsigned long)entry.data_offset);
10771
10772         if (entry.compression) {
10773                 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
10774                         archive_path);
10775                 ret = -LIBBPF_ERRNO__FORMAT;
10776                 goto out;
10777         }
10778
10779         elf = elf_memory((void *)entry.data, entry.data_length);
10780         if (!elf) {
10781                 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
10782                         elf_errmsg(-1));
10783                 ret = -LIBBPF_ERRNO__LIBELF;
10784                 goto out;
10785         }
10786
10787         ret = elf_find_func_offset(elf, file_name, func_name);
10788         if (ret > 0) {
10789                 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
10790                          func_name, file_name, archive_path, entry.data_offset, ret,
10791                          ret + entry.data_offset);
10792                 ret += entry.data_offset;
10793         }
10794         elf_end(elf);
10795
10796 out:
10797         zip_archive_close(archive);
10798         return ret;
10799 }
10800
10801 static const char *arch_specific_lib_paths(void)
10802 {
10803         /*
10804          * Based on https://packages.debian.org/sid/libc6.
10805          *
10806          * Assume that the traced program is built for the same architecture
10807          * as libbpf, which should cover the vast majority of cases.
10808          */
10809 #if defined(__x86_64__)
10810         return "/lib/x86_64-linux-gnu";
10811 #elif defined(__i386__)
10812         return "/lib/i386-linux-gnu";
10813 #elif defined(__s390x__)
10814         return "/lib/s390x-linux-gnu";
10815 #elif defined(__s390__)
10816         return "/lib/s390-linux-gnu";
10817 #elif defined(__arm__) && defined(__SOFTFP__)
10818         return "/lib/arm-linux-gnueabi";
10819 #elif defined(__arm__) && !defined(__SOFTFP__)
10820         return "/lib/arm-linux-gnueabihf";
10821 #elif defined(__aarch64__)
10822         return "/lib/aarch64-linux-gnu";
10823 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
10824         return "/lib/mips64el-linux-gnuabi64";
10825 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
10826         return "/lib/mipsel-linux-gnu";
10827 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
10828         return "/lib/powerpc64le-linux-gnu";
10829 #elif defined(__sparc__) && defined(__arch64__)
10830         return "/lib/sparc64-linux-gnu";
10831 #elif defined(__riscv) && __riscv_xlen == 64
10832         return "/lib/riscv64-linux-gnu";
10833 #else
10834         return NULL;
10835 #endif
10836 }
10837
10838 /* Get full path to program/shared library. */
10839 static int resolve_full_path(const char *file, char *result, size_t result_sz)
10840 {
10841         const char *search_paths[3] = {};
10842         int i, perm;
10843
10844         if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
10845                 search_paths[0] = getenv("LD_LIBRARY_PATH");
10846                 search_paths[1] = "/usr/lib64:/usr/lib";
10847                 search_paths[2] = arch_specific_lib_paths();
10848                 perm = R_OK;
10849         } else {
10850                 search_paths[0] = getenv("PATH");
10851                 search_paths[1] = "/usr/bin:/usr/sbin";
10852                 perm = R_OK | X_OK;
10853         }
10854
10855         for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
10856                 const char *s;
10857
10858                 if (!search_paths[i])
10859                         continue;
10860                 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
10861                         char *next_path;
10862                         int seg_len;
10863
10864                         if (s[0] == ':')
10865                                 s++;
10866                         next_path = strchr(s, ':');
10867                         seg_len = next_path ? next_path - s : strlen(s);
10868                         if (!seg_len)
10869                                 continue;
10870                         snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
10871                         /* ensure it has required permissions */
10872                         if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
10873                                 continue;
10874                         pr_debug("resolved '%s' to '%s'\n", file, result);
10875                         return 0;
10876                 }
10877         }
10878         return -ENOENT;
10879 }
10880
10881 LIBBPF_API struct bpf_link *
10882 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
10883                                 const char *binary_path, size_t func_offset,
10884                                 const struct bpf_uprobe_opts *opts)
10885 {
10886         const char *archive_path = NULL, *archive_sep = NULL;
10887         char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
10888         DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10889         enum probe_attach_mode attach_mode;
10890         char full_path[PATH_MAX];
10891         struct bpf_link *link;
10892         size_t ref_ctr_off;
10893         int pfd, err;
10894         bool retprobe, legacy;
10895         const char *func_name;
10896
10897         if (!OPTS_VALID(opts, bpf_uprobe_opts))
10898                 return libbpf_err_ptr(-EINVAL);
10899
10900         attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
10901         retprobe = OPTS_GET(opts, retprobe, false);
10902         ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
10903         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10904
10905         if (!binary_path)
10906                 return libbpf_err_ptr(-EINVAL);
10907
10908         /* Check if "binary_path" refers to an archive. */
10909         archive_sep = strstr(binary_path, "!/");
10910         if (archive_sep) {
10911                 full_path[0] = '\0';
10912                 libbpf_strlcpy(full_path, binary_path,
10913                                min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
10914                 archive_path = full_path;
10915                 binary_path = archive_sep + 2;
10916         } else if (!strchr(binary_path, '/')) {
10917                 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
10918                 if (err) {
10919                         pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
10920                                 prog->name, binary_path, err);
10921                         return libbpf_err_ptr(err);
10922                 }
10923                 binary_path = full_path;
10924         }
10925         func_name = OPTS_GET(opts, func_name, NULL);
10926         if (func_name) {
10927                 long sym_off;
10928
10929                 if (archive_path) {
10930                         sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
10931                                                                     func_name);
10932                         binary_path = archive_path;
10933                 } else {
10934                         sym_off = elf_find_func_offset_from_file(binary_path, func_name);
10935                 }
10936                 if (sym_off < 0)
10937                         return libbpf_err_ptr(sym_off);
10938                 func_offset += sym_off;
10939         }
10940
10941         legacy = determine_uprobe_perf_type() < 0;
10942         switch (attach_mode) {
10943         case PROBE_ATTACH_MODE_LEGACY:
10944                 legacy = true;
10945                 pe_opts.force_ioctl_attach = true;
10946                 break;
10947         case PROBE_ATTACH_MODE_PERF:
10948                 if (legacy)
10949                         return libbpf_err_ptr(-ENOTSUP);
10950                 pe_opts.force_ioctl_attach = true;
10951                 break;
10952         case PROBE_ATTACH_MODE_LINK:
10953                 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
10954                         return libbpf_err_ptr(-ENOTSUP);
10955                 break;
10956         case PROBE_ATTACH_MODE_DEFAULT:
10957                 break;
10958         default:
10959                 return libbpf_err_ptr(-EINVAL);
10960         }
10961
10962         if (!legacy) {
10963                 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
10964                                             func_offset, pid, ref_ctr_off);
10965         } else {
10966                 char probe_name[PATH_MAX + 64];
10967
10968                 if (ref_ctr_off)
10969                         return libbpf_err_ptr(-EINVAL);
10970
10971                 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
10972                                              binary_path, func_offset);
10973
10974                 legacy_probe = strdup(probe_name);
10975                 if (!legacy_probe)
10976                         return libbpf_err_ptr(-ENOMEM);
10977
10978                 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
10979                                                     binary_path, func_offset, pid);
10980         }
10981         if (pfd < 0) {
10982                 err = -errno;
10983                 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
10984                         prog->name, retprobe ? "uretprobe" : "uprobe",
10985                         binary_path, func_offset,
10986                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10987                 goto err_out;
10988         }
10989
10990         link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10991         err = libbpf_get_error(link);
10992         if (err) {
10993                 close(pfd);
10994                 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
10995                         prog->name, retprobe ? "uretprobe" : "uprobe",
10996                         binary_path, func_offset,
10997                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10998                 goto err_clean_legacy;
10999         }
11000         if (legacy) {
11001                 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11002
11003                 perf_link->legacy_probe_name = legacy_probe;
11004                 perf_link->legacy_is_kprobe = false;
11005                 perf_link->legacy_is_retprobe = retprobe;
11006         }
11007         return link;
11008
11009 err_clean_legacy:
11010         if (legacy)
11011                 remove_uprobe_event_legacy(legacy_probe, retprobe);
11012 err_out:
11013         free(legacy_probe);
11014         return libbpf_err_ptr(err);
11015 }
11016
11017 /* Format of u[ret]probe section definition supporting auto-attach:
11018  * u[ret]probe/binary:function[+offset]
11019  *
11020  * binary can be an absolute/relative path or a filename; the latter is resolved to a
11021  * full binary path via bpf_program__attach_uprobe_opts.
11022  *
11023  * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
11024  * specified (and auto-attach is not possible) or the above format is specified for
11025  * auto-attach.
11026  */
11027 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11028 {
11029         DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
11030         char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11031         int n, ret = -EINVAL;
11032         long offset = 0;
11033
11034         *link = NULL;
11035
11036         n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li",
11037                    &probe_type, &binary_path, &func_name, &offset);
11038         switch (n) {
11039         case 1:
11040                 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11041                 ret = 0;
11042                 break;
11043         case 2:
11044                 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
11045                         prog->name, prog->sec_name);
11046                 break;
11047         case 3:
11048         case 4:
11049                 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
11050                                 strcmp(probe_type, "uretprobe.s") == 0;
11051                 if (opts.retprobe && offset != 0) {
11052                         pr_warn("prog '%s': uretprobes do not support offset specification\n",
11053                                 prog->name);
11054                         break;
11055                 }
11056                 opts.func_name = func_name;
11057                 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
11058                 ret = libbpf_get_error(*link);
11059                 break;
11060         default:
11061                 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11062                         prog->sec_name);
11063                 break;
11064         }
11065         free(probe_type);
11066         free(binary_path);
11067         free(func_name);
11068
11069         return ret;
11070 }
11071
11072 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
11073                                             bool retprobe, pid_t pid,
11074                                             const char *binary_path,
11075                                             size_t func_offset)
11076 {
11077         DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
11078
11079         return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
11080 }
11081
11082 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
11083                                           pid_t pid, const char *binary_path,
11084                                           const char *usdt_provider, const char *usdt_name,
11085                                           const struct bpf_usdt_opts *opts)
11086 {
11087         char resolved_path[512];
11088         struct bpf_object *obj = prog->obj;
11089         struct bpf_link *link;
11090         __u64 usdt_cookie;
11091         int err;
11092
11093         if (!OPTS_VALID(opts, bpf_uprobe_opts))
11094                 return libbpf_err_ptr(-EINVAL);
11095
11096         if (bpf_program__fd(prog) < 0) {
11097                 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
11098                         prog->name);
11099                 return libbpf_err_ptr(-EINVAL);
11100         }
11101
11102         if (!binary_path)
11103                 return libbpf_err_ptr(-EINVAL);
11104
11105         if (!strchr(binary_path, '/')) {
11106                 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
11107                 if (err) {
11108                         pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11109                                 prog->name, binary_path, err);
11110                         return libbpf_err_ptr(err);
11111                 }
11112                 binary_path = resolved_path;
11113         }
11114
11115         /* USDT manager is instantiated lazily on first USDT attach. It will
11116          * be destroyed together with BPF object in bpf_object__close().
11117          */
11118         if (IS_ERR(obj->usdt_man))
11119                 return libbpf_ptr(obj->usdt_man);
11120         if (!obj->usdt_man) {
11121                 obj->usdt_man = usdt_manager_new(obj);
11122                 if (IS_ERR(obj->usdt_man))
11123                         return libbpf_ptr(obj->usdt_man);
11124         }
11125
11126         usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
11127         link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
11128                                         usdt_provider, usdt_name, usdt_cookie);
11129         err = libbpf_get_error(link);
11130         if (err)
11131                 return libbpf_err_ptr(err);
11132         return link;
11133 }
11134
11135 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11136 {
11137         char *path = NULL, *provider = NULL, *name = NULL;
11138         const char *sec_name;
11139         int n, err;
11140
11141         sec_name = bpf_program__section_name(prog);
11142         if (strcmp(sec_name, "usdt") == 0) {
11143                 /* no auto-attach for just SEC("usdt") */
11144                 *link = NULL;
11145                 return 0;
11146         }
11147
11148         n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
11149         if (n != 3) {
11150                 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
11151                         sec_name);
11152                 err = -EINVAL;
11153         } else {
11154                 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
11155                                                  provider, name, NULL);
11156                 err = libbpf_get_error(*link);
11157         }
11158         free(path);
11159         free(provider);
11160         free(name);
11161         return err;
11162 }
11163
11164 static int determine_tracepoint_id(const char *tp_category,
11165                                    const char *tp_name)
11166 {
11167         char file[PATH_MAX];
11168         int ret;
11169
11170         ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11171                        tracefs_path(), tp_category, tp_name);
11172         if (ret < 0)
11173                 return -errno;
11174         if (ret >= sizeof(file)) {
11175                 pr_debug("tracepoint %s/%s path is too long\n",
11176                          tp_category, tp_name);
11177                 return -E2BIG;
11178         }
11179         return parse_uint_from_file(file, "%d\n");
11180 }
11181
11182 static int perf_event_open_tracepoint(const char *tp_category,
11183                                       const char *tp_name)
11184 {
11185         const size_t attr_sz = sizeof(struct perf_event_attr);
11186         struct perf_event_attr attr;
11187         char errmsg[STRERR_BUFSIZE];
11188         int tp_id, pfd, err;
11189
11190         tp_id = determine_tracepoint_id(tp_category, tp_name);
11191         if (tp_id < 0) {
11192                 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
11193                         tp_category, tp_name,
11194                         libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
11195                 return tp_id;
11196         }
11197
11198         memset(&attr, 0, attr_sz);
11199         attr.type = PERF_TYPE_TRACEPOINT;
11200         attr.size = attr_sz;
11201         attr.config = tp_id;
11202
11203         pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
11204                       -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11205         if (pfd < 0) {
11206                 err = -errno;
11207                 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
11208                         tp_category, tp_name,
11209                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11210                 return err;
11211         }
11212         return pfd;
11213 }
11214
11215 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
11216                                                      const char *tp_category,
11217                                                      const char *tp_name,
11218                                                      const struct bpf_tracepoint_opts *opts)
11219 {
11220         DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11221         char errmsg[STRERR_BUFSIZE];
11222         struct bpf_link *link;
11223         int pfd, err;
11224
11225         if (!OPTS_VALID(opts, bpf_tracepoint_opts))
11226                 return libbpf_err_ptr(-EINVAL);
11227
11228         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11229
11230         pfd = perf_event_open_tracepoint(tp_category, tp_name);
11231         if (pfd < 0) {
11232                 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
11233                         prog->name, tp_category, tp_name,
11234                         libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
11235                 return libbpf_err_ptr(pfd);
11236         }
11237         link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11238         err = libbpf_get_error(link);
11239         if (err) {
11240                 close(pfd);
11241                 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
11242                         prog->name, tp_category, tp_name,
11243                         libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11244                 return libbpf_err_ptr(err);
11245         }
11246         return link;
11247 }
11248
11249 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
11250                                                 const char *tp_category,
11251                                                 const char *tp_name)
11252 {
11253         return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
11254 }
11255
11256 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11257 {
11258         char *sec_name, *tp_cat, *tp_name;
11259
11260         *link = NULL;
11261
11262         /* no auto-attach for SEC("tp") or SEC("tracepoint") */
11263         if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
11264                 return 0;
11265
11266         sec_name = strdup(prog->sec_name);
11267         if (!sec_name)
11268                 return -ENOMEM;
11269
11270         /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
11271         if (str_has_pfx(prog->sec_name, "tp/"))
11272                 tp_cat = sec_name + sizeof("tp/") - 1;
11273         else
11274                 tp_cat = sec_name + sizeof("tracepoint/") - 1;
11275         tp_name = strchr(tp_cat, '/');
11276         if (!tp_name) {
11277                 free(sec_name);
11278                 return -EINVAL;
11279         }
11280         *tp_name = '\0';
11281         tp_name++;
11282
11283         *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
11284         free(sec_name);
11285         return libbpf_get_error(*link);
11286 }
11287
11288 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
11289                                                     const char *tp_name)
11290 {
11291         char errmsg[STRERR_BUFSIZE];
11292         struct bpf_link *link;
11293         int prog_fd, pfd;
11294
11295         prog_fd = bpf_program__fd(prog);
11296         if (prog_fd < 0) {
11297                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11298                 return libbpf_err_ptr(-EINVAL);
11299         }
11300
11301         link = calloc(1, sizeof(*link));
11302         if (!link)
11303                 return libbpf_err_ptr(-ENOMEM);
11304         link->detach = &bpf_link__detach_fd;
11305
11306         pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
11307         if (pfd < 0) {
11308                 pfd = -errno;
11309                 free(link);
11310                 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
11311                         prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
11312                 return libbpf_err_ptr(pfd);
11313         }
11314         link->fd = pfd;
11315         return link;
11316 }
11317
11318 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11319 {
11320         static const char *const prefixes[] = {
11321                 "raw_tp",
11322                 "raw_tracepoint",
11323                 "raw_tp.w",
11324                 "raw_tracepoint.w",
11325         };
11326         size_t i;
11327         const char *tp_name = NULL;
11328
11329         *link = NULL;
11330
11331         for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
11332                 size_t pfx_len;
11333
11334                 if (!str_has_pfx(prog->sec_name, prefixes[i]))
11335                         continue;
11336
11337                 pfx_len = strlen(prefixes[i]);
11338                 /* no auto-attach case of, e.g., SEC("raw_tp") */
11339                 if (prog->sec_name[pfx_len] == '\0')
11340                         return 0;
11341
11342                 if (prog->sec_name[pfx_len] != '/')
11343                         continue;
11344
11345                 tp_name = prog->sec_name + pfx_len + 1;
11346                 break;
11347         }
11348
11349         if (!tp_name) {
11350                 pr_warn("prog '%s': invalid section name '%s'\n",
11351                         prog->name, prog->sec_name);
11352                 return -EINVAL;
11353         }
11354
11355         *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
11356         return libbpf_get_error(*link);
11357 }
11358
11359 /* Common logic for all BPF program types that attach to a btf_id */
11360 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
11361                                                    const struct bpf_trace_opts *opts)
11362 {
11363         LIBBPF_OPTS(bpf_link_create_opts, link_opts);
11364         char errmsg[STRERR_BUFSIZE];
11365         struct bpf_link *link;
11366         int prog_fd, pfd;
11367
11368         if (!OPTS_VALID(opts, bpf_trace_opts))
11369                 return libbpf_err_ptr(-EINVAL);
11370
11371         prog_fd = bpf_program__fd(prog);
11372         if (prog_fd < 0) {
11373                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11374                 return libbpf_err_ptr(-EINVAL);
11375         }
11376
11377         link = calloc(1, sizeof(*link));
11378         if (!link)
11379                 return libbpf_err_ptr(-ENOMEM);
11380         link->detach = &bpf_link__detach_fd;
11381
11382         /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
11383         link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
11384         pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
11385         if (pfd < 0) {
11386                 pfd = -errno;
11387                 free(link);
11388                 pr_warn("prog '%s': failed to attach: %s\n",
11389                         prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
11390                 return libbpf_err_ptr(pfd);
11391         }
11392         link->fd = pfd;
11393         return link;
11394 }
11395
11396 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
11397 {
11398         return bpf_program__attach_btf_id(prog, NULL);
11399 }
11400
11401 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
11402                                                 const struct bpf_trace_opts *opts)
11403 {
11404         return bpf_program__attach_btf_id(prog, opts);
11405 }
11406
11407 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
11408 {
11409         return bpf_program__attach_btf_id(prog, NULL);
11410 }
11411
11412 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11413 {
11414         *link = bpf_program__attach_trace(prog);
11415         return libbpf_get_error(*link);
11416 }
11417
11418 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11419 {
11420         *link = bpf_program__attach_lsm(prog);
11421         return libbpf_get_error(*link);
11422 }
11423
11424 static struct bpf_link *
11425 bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id,
11426                        const char *target_name)
11427 {
11428         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
11429                             .target_btf_id = btf_id);
11430         enum bpf_attach_type attach_type;
11431         char errmsg[STRERR_BUFSIZE];
11432         struct bpf_link *link;
11433         int prog_fd, link_fd;
11434
11435         prog_fd = bpf_program__fd(prog);
11436         if (prog_fd < 0) {
11437                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11438                 return libbpf_err_ptr(-EINVAL);
11439         }
11440
11441         link = calloc(1, sizeof(*link));
11442         if (!link)
11443                 return libbpf_err_ptr(-ENOMEM);
11444         link->detach = &bpf_link__detach_fd;
11445
11446         attach_type = bpf_program__expected_attach_type(prog);
11447         link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
11448         if (link_fd < 0) {
11449                 link_fd = -errno;
11450                 free(link);
11451                 pr_warn("prog '%s': failed to attach to %s: %s\n",
11452                         prog->name, target_name,
11453                         libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
11454                 return libbpf_err_ptr(link_fd);
11455         }
11456         link->fd = link_fd;
11457         return link;
11458 }
11459
11460 struct bpf_link *
11461 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
11462 {
11463         return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
11464 }
11465
11466 struct bpf_link *
11467 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
11468 {
11469         return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
11470 }
11471
11472 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
11473 {
11474         /* target_fd/target_ifindex use the same field in LINK_CREATE */
11475         return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
11476 }
11477
11478 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
11479                                               int target_fd,
11480                                               const char *attach_func_name)
11481 {
11482         int btf_id;
11483
11484         if (!!target_fd != !!attach_func_name) {
11485                 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
11486                         prog->name);
11487                 return libbpf_err_ptr(-EINVAL);
11488         }
11489
11490         if (prog->type != BPF_PROG_TYPE_EXT) {
11491                 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
11492                         prog->name);
11493                 return libbpf_err_ptr(-EINVAL);
11494         }
11495
11496         if (target_fd) {
11497                 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
11498                 if (btf_id < 0)
11499                         return libbpf_err_ptr(btf_id);
11500
11501                 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
11502         } else {
11503                 /* no target, so use raw_tracepoint_open for compatibility
11504                  * with old kernels
11505                  */
11506                 return bpf_program__attach_trace(prog);
11507         }
11508 }
11509
11510 struct bpf_link *
11511 bpf_program__attach_iter(const struct bpf_program *prog,
11512                          const struct bpf_iter_attach_opts *opts)
11513 {
11514         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
11515         char errmsg[STRERR_BUFSIZE];
11516         struct bpf_link *link;
11517         int prog_fd, link_fd;
11518         __u32 target_fd = 0;
11519
11520         if (!OPTS_VALID(opts, bpf_iter_attach_opts))
11521                 return libbpf_err_ptr(-EINVAL);
11522
11523         link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
11524         link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
11525
11526         prog_fd = bpf_program__fd(prog);
11527         if (prog_fd < 0) {
11528                 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11529                 return libbpf_err_ptr(-EINVAL);
11530         }
11531
11532         link = calloc(1, sizeof(*link));
11533         if (!link)
11534                 return libbpf_err_ptr(-ENOMEM);
11535         link->detach = &bpf_link__detach_fd;
11536
11537         link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
11538                                   &link_create_opts);
11539         if (link_fd < 0) {
11540                 link_fd = -errno;
11541                 free(link);
11542                 pr_warn("prog '%s': failed to attach to iterator: %s\n",
11543                         prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
11544                 return libbpf_err_ptr(link_fd);
11545         }
11546         link->fd = link_fd;
11547         return link;
11548 }
11549
11550 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11551 {
11552         *link = bpf_program__attach_iter(prog, NULL);
11553         return libbpf_get_error(*link);
11554 }
11555
11556 struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
11557 {
11558         struct bpf_link *link = NULL;
11559         int err;
11560
11561         if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
11562                 return libbpf_err_ptr(-EOPNOTSUPP);
11563
11564         err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
11565         if (err)
11566                 return libbpf_err_ptr(err);
11567
11568         /* When calling bpf_program__attach() explicitly, auto-attach support
11569          * is expected to work, so NULL returned link is considered an error.
11570          * This is different for skeleton's attach, see comment in
11571          * bpf_object__attach_skeleton().
11572          */
11573         if (!link)
11574                 return libbpf_err_ptr(-EOPNOTSUPP);
11575
11576         return link;
11577 }
11578
11579 static int bpf_link__detach_struct_ops(struct bpf_link *link)
11580 {
11581         __u32 zero = 0;
11582
11583         if (bpf_map_delete_elem(link->fd, &zero))
11584                 return -errno;
11585
11586         return 0;
11587 }
11588
11589 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
11590 {
11591         struct bpf_struct_ops *st_ops;
11592         struct bpf_link *link;
11593         __u32 i, zero = 0;
11594         int err;
11595
11596         if (!bpf_map__is_struct_ops(map) || map->fd == -1)
11597                 return libbpf_err_ptr(-EINVAL);
11598
11599         link = calloc(1, sizeof(*link));
11600         if (!link)
11601                 return libbpf_err_ptr(-EINVAL);
11602
11603         st_ops = map->st_ops;
11604         for (i = 0; i < btf_vlen(st_ops->type); i++) {
11605                 struct bpf_program *prog = st_ops->progs[i];
11606                 void *kern_data;
11607                 int prog_fd;
11608
11609                 if (!prog)
11610                         continue;
11611
11612                 prog_fd = bpf_program__fd(prog);
11613                 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
11614                 *(unsigned long *)kern_data = prog_fd;
11615         }
11616
11617         err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
11618         if (err) {
11619                 err = -errno;
11620                 free(link);
11621                 return libbpf_err_ptr(err);
11622         }
11623
11624         link->detach = bpf_link__detach_struct_ops;
11625         link->fd = map->fd;
11626
11627         return link;
11628 }
11629
11630 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
11631                                                           void *private_data);
11632
11633 static enum bpf_perf_event_ret
11634 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
11635                        void **copy_mem, size_t *copy_size,
11636                        bpf_perf_event_print_t fn, void *private_data)
11637 {
11638         struct perf_event_mmap_page *header = mmap_mem;
11639         __u64 data_head = ring_buffer_read_head(header);
11640         __u64 data_tail = header->data_tail;
11641         void *base = ((__u8 *)header) + page_size;
11642         int ret = LIBBPF_PERF_EVENT_CONT;
11643         struct perf_event_header *ehdr;
11644         size_t ehdr_size;
11645
11646         while (data_head != data_tail) {
11647                 ehdr = base + (data_tail & (mmap_size - 1));
11648                 ehdr_size = ehdr->size;
11649
11650                 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
11651                         void *copy_start = ehdr;
11652                         size_t len_first = base + mmap_size - copy_start;
11653                         size_t len_secnd = ehdr_size - len_first;
11654
11655                         if (*copy_size < ehdr_size) {
11656                                 free(*copy_mem);
11657                                 *copy_mem = malloc(ehdr_size);
11658                                 if (!*copy_mem) {
11659                                         *copy_size = 0;
11660                                         ret = LIBBPF_PERF_EVENT_ERROR;
11661                                         break;
11662                                 }
11663                                 *copy_size = ehdr_size;
11664                         }
11665
11666                         memcpy(*copy_mem, copy_start, len_first);
11667                         memcpy(*copy_mem + len_first, base, len_secnd);
11668                         ehdr = *copy_mem;
11669                 }
11670
11671                 ret = fn(ehdr, private_data);
11672                 data_tail += ehdr_size;
11673                 if (ret != LIBBPF_PERF_EVENT_CONT)
11674                         break;
11675         }
11676
11677         ring_buffer_write_tail(header, data_tail);
11678         return libbpf_err(ret);
11679 }
11680
11681 struct perf_buffer;
11682
11683 struct perf_buffer_params {
11684         struct perf_event_attr *attr;
11685         /* if event_cb is specified, it takes precendence */
11686         perf_buffer_event_fn event_cb;
11687         /* sample_cb and lost_cb are higher-level common-case callbacks */
11688         perf_buffer_sample_fn sample_cb;
11689         perf_buffer_lost_fn lost_cb;
11690         void *ctx;
11691         int cpu_cnt;
11692         int *cpus;
11693         int *map_keys;
11694 };
11695
11696 struct perf_cpu_buf {
11697         struct perf_buffer *pb;
11698         void *base; /* mmap()'ed memory */
11699         void *buf; /* for reconstructing segmented data */
11700         size_t buf_size;
11701         int fd;
11702         int cpu;
11703         int map_key;
11704 };
11705
11706 struct perf_buffer {
11707         perf_buffer_event_fn event_cb;
11708         perf_buffer_sample_fn sample_cb;
11709         perf_buffer_lost_fn lost_cb;
11710         void *ctx; /* passed into callbacks */
11711
11712         size_t page_size;
11713         size_t mmap_size;
11714         struct perf_cpu_buf **cpu_bufs;
11715         struct epoll_event *events;
11716         int cpu_cnt; /* number of allocated CPU buffers */
11717         int epoll_fd; /* perf event FD */
11718         int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
11719 };
11720
11721 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
11722                                       struct perf_cpu_buf *cpu_buf)
11723 {
11724         if (!cpu_buf)
11725                 return;
11726         if (cpu_buf->base &&
11727             munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
11728                 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
11729         if (cpu_buf->fd >= 0) {
11730                 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
11731                 close(cpu_buf->fd);
11732         }
11733         free(cpu_buf->buf);
11734         free(cpu_buf);
11735 }
11736
11737 void perf_buffer__free(struct perf_buffer *pb)
11738 {
11739         int i;
11740
11741         if (IS_ERR_OR_NULL(pb))
11742                 return;
11743         if (pb->cpu_bufs) {
11744                 for (i = 0; i < pb->cpu_cnt; i++) {
11745                         struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
11746
11747                         if (!cpu_buf)
11748                                 continue;
11749
11750                         bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
11751                         perf_buffer__free_cpu_buf(pb, cpu_buf);
11752                 }
11753                 free(pb->cpu_bufs);
11754         }
11755         if (pb->epoll_fd >= 0)
11756                 close(pb->epoll_fd);
11757         free(pb->events);
11758         free(pb);
11759 }
11760
11761 static struct perf_cpu_buf *
11762 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
11763                           int cpu, int map_key)
11764 {
11765         struct perf_cpu_buf *cpu_buf;
11766         char msg[STRERR_BUFSIZE];
11767         int err;
11768
11769         cpu_buf = calloc(1, sizeof(*cpu_buf));
11770         if (!cpu_buf)
11771                 return ERR_PTR(-ENOMEM);
11772
11773         cpu_buf->pb = pb;
11774         cpu_buf->cpu = cpu;
11775         cpu_buf->map_key = map_key;
11776
11777         cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
11778                               -1, PERF_FLAG_FD_CLOEXEC);
11779         if (cpu_buf->fd < 0) {
11780                 err = -errno;
11781                 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
11782                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
11783                 goto error;
11784         }
11785
11786         cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
11787                              PROT_READ | PROT_WRITE, MAP_SHARED,
11788                              cpu_buf->fd, 0);
11789         if (cpu_buf->base == MAP_FAILED) {
11790                 cpu_buf->base = NULL;
11791                 err = -errno;
11792                 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
11793                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
11794                 goto error;
11795         }
11796
11797         if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
11798                 err = -errno;
11799                 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
11800                         cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
11801                 goto error;
11802         }
11803
11804         return cpu_buf;
11805
11806 error:
11807         perf_buffer__free_cpu_buf(pb, cpu_buf);
11808         return (struct perf_cpu_buf *)ERR_PTR(err);
11809 }
11810
11811 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
11812                                               struct perf_buffer_params *p);
11813
11814 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
11815                                      perf_buffer_sample_fn sample_cb,
11816                                      perf_buffer_lost_fn lost_cb,
11817                                      void *ctx,
11818                                      const struct perf_buffer_opts *opts)
11819 {
11820         const size_t attr_sz = sizeof(struct perf_event_attr);
11821         struct perf_buffer_params p = {};
11822         struct perf_event_attr attr;
11823         __u32 sample_period;
11824
11825         if (!OPTS_VALID(opts, perf_buffer_opts))
11826                 return libbpf_err_ptr(-EINVAL);
11827
11828         sample_period = OPTS_GET(opts, sample_period, 1);
11829         if (!sample_period)
11830                 sample_period = 1;
11831
11832         memset(&attr, 0, attr_sz);
11833         attr.size = attr_sz;
11834         attr.config = PERF_COUNT_SW_BPF_OUTPUT;
11835         attr.type = PERF_TYPE_SOFTWARE;
11836         attr.sample_type = PERF_SAMPLE_RAW;
11837         attr.sample_period = sample_period;
11838         attr.wakeup_events = sample_period;
11839
11840         p.attr = &attr;
11841         p.sample_cb = sample_cb;
11842         p.lost_cb = lost_cb;
11843         p.ctx = ctx;
11844
11845         return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
11846 }
11847
11848 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
11849                                          struct perf_event_attr *attr,
11850                                          perf_buffer_event_fn event_cb, void *ctx,
11851                                          const struct perf_buffer_raw_opts *opts)
11852 {
11853         struct perf_buffer_params p = {};
11854
11855         if (!attr)
11856                 return libbpf_err_ptr(-EINVAL);
11857
11858         if (!OPTS_VALID(opts, perf_buffer_raw_opts))
11859                 return libbpf_err_ptr(-EINVAL);
11860
11861         p.attr = attr;
11862         p.event_cb = event_cb;
11863         p.ctx = ctx;
11864         p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
11865         p.cpus = OPTS_GET(opts, cpus, NULL);
11866         p.map_keys = OPTS_GET(opts, map_keys, NULL);
11867
11868         return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
11869 }
11870
11871 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
11872                                               struct perf_buffer_params *p)
11873 {
11874         const char *online_cpus_file = "/sys/devices/system/cpu/online";
11875         struct bpf_map_info map;
11876         char msg[STRERR_BUFSIZE];
11877         struct perf_buffer *pb;
11878         bool *online = NULL;
11879         __u32 map_info_len;
11880         int err, i, j, n;
11881
11882         if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
11883                 pr_warn("page count should be power of two, but is %zu\n",
11884                         page_cnt);
11885                 return ERR_PTR(-EINVAL);
11886         }
11887
11888         /* best-effort sanity checks */
11889         memset(&map, 0, sizeof(map));
11890         map_info_len = sizeof(map);
11891         err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
11892         if (err) {
11893                 err = -errno;
11894                 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
11895                  * -EBADFD, -EFAULT, or -E2BIG on real error
11896                  */
11897                 if (err != -EINVAL) {
11898                         pr_warn("failed to get map info for map FD %d: %s\n",
11899                                 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
11900                         return ERR_PTR(err);
11901                 }
11902                 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
11903                          map_fd);
11904         } else {
11905                 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
11906                         pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
11907                                 map.name);
11908                         return ERR_PTR(-EINVAL);
11909                 }
11910         }
11911
11912         pb = calloc(1, sizeof(*pb));
11913         if (!pb)
11914                 return ERR_PTR(-ENOMEM);
11915
11916         pb->event_cb = p->event_cb;
11917         pb->sample_cb = p->sample_cb;
11918         pb->lost_cb = p->lost_cb;
11919         pb->ctx = p->ctx;
11920
11921         pb->page_size = getpagesize();
11922         pb->mmap_size = pb->page_size * page_cnt;
11923         pb->map_fd = map_fd;
11924
11925         pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
11926         if (pb->epoll_fd < 0) {
11927                 err = -errno;
11928                 pr_warn("failed to create epoll instance: %s\n",
11929                         libbpf_strerror_r(err, msg, sizeof(msg)));
11930                 goto error;
11931         }
11932
11933         if (p->cpu_cnt > 0) {
11934                 pb->cpu_cnt = p->cpu_cnt;
11935         } else {
11936                 pb->cpu_cnt = libbpf_num_possible_cpus();
11937                 if (pb->cpu_cnt < 0) {
11938                         err = pb->cpu_cnt;
11939                         goto error;
11940                 }
11941                 if (map.max_entries && map.max_entries < pb->cpu_cnt)
11942                         pb->cpu_cnt = map.max_entries;
11943         }
11944
11945         pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
11946         if (!pb->events) {
11947                 err = -ENOMEM;
11948                 pr_warn("failed to allocate events: out of memory\n");
11949                 goto error;
11950         }
11951         pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
11952         if (!pb->cpu_bufs) {
11953                 err = -ENOMEM;
11954                 pr_warn("failed to allocate buffers: out of memory\n");
11955                 goto error;
11956         }
11957
11958         err = parse_cpu_mask_file(online_cpus_file, &online, &n);
11959         if (err) {
11960                 pr_warn("failed to get online CPU mask: %d\n", err);
11961                 goto error;
11962         }
11963
11964         for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
11965                 struct perf_cpu_buf *cpu_buf;
11966                 int cpu, map_key;
11967
11968                 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
11969                 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
11970
11971                 /* in case user didn't explicitly requested particular CPUs to
11972                  * be attached to, skip offline/not present CPUs
11973                  */
11974                 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
11975                         continue;
11976
11977                 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
11978                 if (IS_ERR(cpu_buf)) {
11979                         err = PTR_ERR(cpu_buf);
11980                         goto error;
11981                 }
11982
11983                 pb->cpu_bufs[j] = cpu_buf;
11984
11985                 err = bpf_map_update_elem(pb->map_fd, &map_key,
11986                                           &cpu_buf->fd, 0);
11987                 if (err) {
11988                         err = -errno;
11989                         pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
11990                                 cpu, map_key, cpu_buf->fd,
11991                                 libbpf_strerror_r(err, msg, sizeof(msg)));
11992                         goto error;
11993                 }
11994
11995                 pb->events[j].events = EPOLLIN;
11996                 pb->events[j].data.ptr = cpu_buf;
11997                 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
11998                               &pb->events[j]) < 0) {
11999                         err = -errno;
12000                         pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
12001                                 cpu, cpu_buf->fd,
12002                                 libbpf_strerror_r(err, msg, sizeof(msg)));
12003                         goto error;
12004                 }
12005                 j++;
12006         }
12007         pb->cpu_cnt = j;
12008         free(online);
12009
12010         return pb;
12011
12012 error:
12013         free(online);
12014         if (pb)
12015                 perf_buffer__free(pb);
12016         return ERR_PTR(err);
12017 }
12018
12019 struct perf_sample_raw {
12020         struct perf_event_header header;
12021         uint32_t size;
12022         char data[];
12023 };
12024
12025 struct perf_sample_lost {
12026         struct perf_event_header header;
12027         uint64_t id;
12028         uint64_t lost;
12029         uint64_t sample_id;
12030 };
12031
12032 static enum bpf_perf_event_ret
12033 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
12034 {
12035         struct perf_cpu_buf *cpu_buf = ctx;
12036         struct perf_buffer *pb = cpu_buf->pb;
12037         void *data = e;
12038
12039         /* user wants full control over parsing perf event */
12040         if (pb->event_cb)
12041                 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
12042
12043         switch (e->type) {
12044         case PERF_RECORD_SAMPLE: {
12045                 struct perf_sample_raw *s = data;
12046
12047                 if (pb->sample_cb)
12048                         pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
12049                 break;
12050         }
12051         case PERF_RECORD_LOST: {
12052                 struct perf_sample_lost *s = data;
12053
12054                 if (pb->lost_cb)
12055                         pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
12056                 break;
12057         }
12058         default:
12059                 pr_warn("unknown perf sample type %d\n", e->type);
12060                 return LIBBPF_PERF_EVENT_ERROR;
12061         }
12062         return LIBBPF_PERF_EVENT_CONT;
12063 }
12064
12065 static int perf_buffer__process_records(struct perf_buffer *pb,
12066                                         struct perf_cpu_buf *cpu_buf)
12067 {
12068         enum bpf_perf_event_ret ret;
12069
12070         ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
12071                                      pb->page_size, &cpu_buf->buf,
12072                                      &cpu_buf->buf_size,
12073                                      perf_buffer__process_record, cpu_buf);
12074         if (ret != LIBBPF_PERF_EVENT_CONT)
12075                 return ret;
12076         return 0;
12077 }
12078
12079 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
12080 {
12081         return pb->epoll_fd;
12082 }
12083
12084 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
12085 {
12086         int i, cnt, err;
12087
12088         cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
12089         if (cnt < 0)
12090                 return -errno;
12091
12092         for (i = 0; i < cnt; i++) {
12093                 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
12094
12095                 err = perf_buffer__process_records(pb, cpu_buf);
12096                 if (err) {
12097                         pr_warn("error while processing records: %d\n", err);
12098                         return libbpf_err(err);
12099                 }
12100         }
12101         return cnt;
12102 }
12103
12104 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
12105  * manager.
12106  */
12107 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
12108 {
12109         return pb->cpu_cnt;
12110 }
12111
12112 /*
12113  * Return perf_event FD of a ring buffer in *buf_idx* slot of
12114  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
12115  * select()/poll()/epoll() Linux syscalls.
12116  */
12117 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
12118 {
12119         struct perf_cpu_buf *cpu_buf;
12120
12121         if (buf_idx >= pb->cpu_cnt)
12122                 return libbpf_err(-EINVAL);
12123
12124         cpu_buf = pb->cpu_bufs[buf_idx];
12125         if (!cpu_buf)
12126                 return libbpf_err(-ENOENT);
12127
12128         return cpu_buf->fd;
12129 }
12130
12131 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
12132 {
12133         struct perf_cpu_buf *cpu_buf;
12134
12135         if (buf_idx >= pb->cpu_cnt)
12136                 return libbpf_err(-EINVAL);
12137
12138         cpu_buf = pb->cpu_bufs[buf_idx];
12139         if (!cpu_buf)
12140                 return libbpf_err(-ENOENT);
12141
12142         *buf = cpu_buf->base;
12143         *buf_size = pb->mmap_size;
12144         return 0;
12145 }
12146
12147 /*
12148  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
12149  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
12150  * consume, do nothing and return success.
12151  * Returns:
12152  *   - 0 on success;
12153  *   - <0 on failure.
12154  */
12155 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
12156 {
12157         struct perf_cpu_buf *cpu_buf;
12158
12159         if (buf_idx >= pb->cpu_cnt)
12160                 return libbpf_err(-EINVAL);
12161
12162         cpu_buf = pb->cpu_bufs[buf_idx];
12163         if (!cpu_buf)
12164                 return libbpf_err(-ENOENT);
12165
12166         return perf_buffer__process_records(pb, cpu_buf);
12167 }
12168
12169 int perf_buffer__consume(struct perf_buffer *pb)
12170 {
12171         int i, err;
12172
12173         for (i = 0; i < pb->cpu_cnt; i++) {
12174                 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12175
12176                 if (!cpu_buf)
12177                         continue;
12178
12179                 err = perf_buffer__process_records(pb, cpu_buf);
12180                 if (err) {
12181                         pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
12182                         return libbpf_err(err);
12183                 }
12184         }
12185         return 0;
12186 }
12187
12188 int bpf_program__set_attach_target(struct bpf_program *prog,
12189                                    int attach_prog_fd,
12190                                    const char *attach_func_name)
12191 {
12192         int btf_obj_fd = 0, btf_id = 0, err;
12193
12194         if (!prog || attach_prog_fd < 0)
12195                 return libbpf_err(-EINVAL);
12196
12197         if (prog->obj->loaded)
12198                 return libbpf_err(-EINVAL);
12199
12200         if (attach_prog_fd && !attach_func_name) {
12201                 /* remember attach_prog_fd and let bpf_program__load() find
12202                  * BTF ID during the program load
12203                  */
12204                 prog->attach_prog_fd = attach_prog_fd;
12205                 return 0;
12206         }
12207
12208         if (attach_prog_fd) {
12209                 btf_id = libbpf_find_prog_btf_id(attach_func_name,
12210                                                  attach_prog_fd);
12211                 if (btf_id < 0)
12212                         return libbpf_err(btf_id);
12213         } else {
12214                 if (!attach_func_name)
12215                         return libbpf_err(-EINVAL);
12216
12217                 /* load btf_vmlinux, if not yet */
12218                 err = bpf_object__load_vmlinux_btf(prog->obj, true);
12219                 if (err)
12220                         return libbpf_err(err);
12221                 err = find_kernel_btf_id(prog->obj, attach_func_name,
12222                                          prog->expected_attach_type,
12223                                          &btf_obj_fd, &btf_id);
12224                 if (err)
12225                         return libbpf_err(err);
12226         }
12227
12228         prog->attach_btf_id = btf_id;
12229         prog->attach_btf_obj_fd = btf_obj_fd;
12230         prog->attach_prog_fd = attach_prog_fd;
12231         return 0;
12232 }
12233
12234 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
12235 {
12236         int err = 0, n, len, start, end = -1;
12237         bool *tmp;
12238
12239         *mask = NULL;
12240         *mask_sz = 0;
12241
12242         /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
12243         while (*s) {
12244                 if (*s == ',' || *s == '\n') {
12245                         s++;
12246                         continue;
12247                 }
12248                 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
12249                 if (n <= 0 || n > 2) {
12250                         pr_warn("Failed to get CPU range %s: %d\n", s, n);
12251                         err = -EINVAL;
12252                         goto cleanup;
12253                 } else if (n == 1) {
12254                         end = start;
12255                 }
12256                 if (start < 0 || start > end) {
12257                         pr_warn("Invalid CPU range [%d,%d] in %s\n",
12258                                 start, end, s);
12259                         err = -EINVAL;
12260                         goto cleanup;
12261                 }
12262                 tmp = realloc(*mask, end + 1);
12263                 if (!tmp) {
12264                         err = -ENOMEM;
12265                         goto cleanup;
12266                 }
12267                 *mask = tmp;
12268                 memset(tmp + *mask_sz, 0, start - *mask_sz);
12269                 memset(tmp + start, 1, end - start + 1);
12270                 *mask_sz = end + 1;
12271                 s += len;
12272         }
12273         if (!*mask_sz) {
12274                 pr_warn("Empty CPU range\n");
12275                 return -EINVAL;
12276         }
12277         return 0;
12278 cleanup:
12279         free(*mask);
12280         *mask = NULL;
12281         return err;
12282 }
12283
12284 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
12285 {
12286         int fd, err = 0, len;
12287         char buf[128];
12288
12289         fd = open(fcpu, O_RDONLY | O_CLOEXEC);
12290         if (fd < 0) {
12291                 err = -errno;
12292                 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
12293                 return err;
12294         }
12295         len = read(fd, buf, sizeof(buf));
12296         close(fd);
12297         if (len <= 0) {
12298                 err = len ? -errno : -EINVAL;
12299                 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
12300                 return err;
12301         }
12302         if (len >= sizeof(buf)) {
12303                 pr_warn("CPU mask is too big in file %s\n", fcpu);
12304                 return -E2BIG;
12305         }
12306         buf[len] = '\0';
12307
12308         return parse_cpu_mask_str(buf, mask, mask_sz);
12309 }
12310
12311 int libbpf_num_possible_cpus(void)
12312 {
12313         static const char *fcpu = "/sys/devices/system/cpu/possible";
12314         static int cpus;
12315         int err, n, i, tmp_cpus;
12316         bool *mask;
12317
12318         tmp_cpus = READ_ONCE(cpus);
12319         if (tmp_cpus > 0)
12320                 return tmp_cpus;
12321
12322         err = parse_cpu_mask_file(fcpu, &mask, &n);
12323         if (err)
12324                 return libbpf_err(err);
12325
12326         tmp_cpus = 0;
12327         for (i = 0; i < n; i++) {
12328                 if (mask[i])
12329                         tmp_cpus++;
12330         }
12331         free(mask);
12332
12333         WRITE_ONCE(cpus, tmp_cpus);
12334         return tmp_cpus;
12335 }
12336
12337 static int populate_skeleton_maps(const struct bpf_object *obj,
12338                                   struct bpf_map_skeleton *maps,
12339                                   size_t map_cnt)
12340 {
12341         int i;
12342
12343         for (i = 0; i < map_cnt; i++) {
12344                 struct bpf_map **map = maps[i].map;
12345                 const char *name = maps[i].name;
12346                 void **mmaped = maps[i].mmaped;
12347
12348                 *map = bpf_object__find_map_by_name(obj, name);
12349                 if (!*map) {
12350                         pr_warn("failed to find skeleton map '%s'\n", name);
12351                         return -ESRCH;
12352                 }
12353
12354                 /* externs shouldn't be pre-setup from user code */
12355                 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
12356                         *mmaped = (*map)->mmaped;
12357         }
12358         return 0;
12359 }
12360
12361 static int populate_skeleton_progs(const struct bpf_object *obj,
12362                                    struct bpf_prog_skeleton *progs,
12363                                    size_t prog_cnt)
12364 {
12365         int i;
12366
12367         for (i = 0; i < prog_cnt; i++) {
12368                 struct bpf_program **prog = progs[i].prog;
12369                 const char *name = progs[i].name;
12370
12371                 *prog = bpf_object__find_program_by_name(obj, name);
12372                 if (!*prog) {
12373                         pr_warn("failed to find skeleton program '%s'\n", name);
12374                         return -ESRCH;
12375                 }
12376         }
12377         return 0;
12378 }
12379
12380 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
12381                               const struct bpf_object_open_opts *opts)
12382 {
12383         DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
12384                 .object_name = s->name,
12385         );
12386         struct bpf_object *obj;
12387         int err;
12388
12389         /* Attempt to preserve opts->object_name, unless overriden by user
12390          * explicitly. Overwriting object name for skeletons is discouraged,
12391          * as it breaks global data maps, because they contain object name
12392          * prefix as their own map name prefix. When skeleton is generated,
12393          * bpftool is making an assumption that this name will stay the same.
12394          */
12395         if (opts) {
12396                 memcpy(&skel_opts, opts, sizeof(*opts));
12397                 if (!opts->object_name)
12398                         skel_opts.object_name = s->name;
12399         }
12400
12401         obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
12402         err = libbpf_get_error(obj);
12403         if (err) {
12404                 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
12405                         s->name, err);
12406                 return libbpf_err(err);
12407         }
12408
12409         *s->obj = obj;
12410         err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
12411         if (err) {
12412                 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
12413                 return libbpf_err(err);
12414         }
12415
12416         err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
12417         if (err) {
12418                 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
12419                 return libbpf_err(err);
12420         }
12421
12422         return 0;
12423 }
12424
12425 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
12426 {
12427         int err, len, var_idx, i;
12428         const char *var_name;
12429         const struct bpf_map *map;
12430         struct btf *btf;
12431         __u32 map_type_id;
12432         const struct btf_type *map_type, *var_type;
12433         const struct bpf_var_skeleton *var_skel;
12434         struct btf_var_secinfo *var;
12435
12436         if (!s->obj)
12437                 return libbpf_err(-EINVAL);
12438
12439         btf = bpf_object__btf(s->obj);
12440         if (!btf) {
12441                 pr_warn("subskeletons require BTF at runtime (object %s)\n",
12442                         bpf_object__name(s->obj));
12443                 return libbpf_err(-errno);
12444         }
12445
12446         err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
12447         if (err) {
12448                 pr_warn("failed to populate subskeleton maps: %d\n", err);
12449                 return libbpf_err(err);
12450         }
12451
12452         err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
12453         if (err) {
12454                 pr_warn("failed to populate subskeleton maps: %d\n", err);
12455                 return libbpf_err(err);
12456         }
12457
12458         for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
12459                 var_skel = &s->vars[var_idx];
12460                 map = *var_skel->map;
12461                 map_type_id = bpf_map__btf_value_type_id(map);
12462                 map_type = btf__type_by_id(btf, map_type_id);
12463
12464                 if (!btf_is_datasec(map_type)) {
12465                         pr_warn("type for map '%1$s' is not a datasec: %2$s",
12466                                 bpf_map__name(map),
12467                                 __btf_kind_str(btf_kind(map_type)));
12468                         return libbpf_err(-EINVAL);
12469                 }
12470
12471                 len = btf_vlen(map_type);
12472                 var = btf_var_secinfos(map_type);
12473                 for (i = 0; i < len; i++, var++) {
12474                         var_type = btf__type_by_id(btf, var->type);
12475                         var_name = btf__name_by_offset(btf, var_type->name_off);
12476                         if (strcmp(var_name, var_skel->name) == 0) {
12477                                 *var_skel->addr = map->mmaped + var->offset;
12478                                 break;
12479                         }
12480                 }
12481         }
12482         return 0;
12483 }
12484
12485 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
12486 {
12487         if (!s)
12488                 return;
12489         free(s->maps);
12490         free(s->progs);
12491         free(s->vars);
12492         free(s);
12493 }
12494
12495 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
12496 {
12497         int i, err;
12498
12499         err = bpf_object__load(*s->obj);
12500         if (err) {
12501                 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
12502                 return libbpf_err(err);
12503         }
12504
12505         for (i = 0; i < s->map_cnt; i++) {
12506                 struct bpf_map *map = *s->maps[i].map;
12507                 size_t mmap_sz = bpf_map_mmap_sz(map);
12508                 int prot, map_fd = bpf_map__fd(map);
12509                 void **mmaped = s->maps[i].mmaped;
12510
12511                 if (!mmaped)
12512                         continue;
12513
12514                 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
12515                         *mmaped = NULL;
12516                         continue;
12517                 }
12518
12519                 if (map->def.map_flags & BPF_F_RDONLY_PROG)
12520                         prot = PROT_READ;
12521                 else
12522                         prot = PROT_READ | PROT_WRITE;
12523
12524                 /* Remap anonymous mmap()-ed "map initialization image" as
12525                  * a BPF map-backed mmap()-ed memory, but preserving the same
12526                  * memory address. This will cause kernel to change process'
12527                  * page table to point to a different piece of kernel memory,
12528                  * but from userspace point of view memory address (and its
12529                  * contents, being identical at this point) will stay the
12530                  * same. This mapping will be released by bpf_object__close()
12531                  * as per normal clean up procedure, so we don't need to worry
12532                  * about it from skeleton's clean up perspective.
12533                  */
12534                 *mmaped = mmap(map->mmaped, mmap_sz, prot,
12535                                 MAP_SHARED | MAP_FIXED, map_fd, 0);
12536                 if (*mmaped == MAP_FAILED) {
12537                         err = -errno;
12538                         *mmaped = NULL;
12539                         pr_warn("failed to re-mmap() map '%s': %d\n",
12540                                  bpf_map__name(map), err);
12541                         return libbpf_err(err);
12542                 }
12543         }
12544
12545         return 0;
12546 }
12547
12548 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
12549 {
12550         int i, err;
12551
12552         for (i = 0; i < s->prog_cnt; i++) {
12553                 struct bpf_program *prog = *s->progs[i].prog;
12554                 struct bpf_link **link = s->progs[i].link;
12555
12556                 if (!prog->autoload || !prog->autoattach)
12557                         continue;
12558
12559                 /* auto-attaching not supported for this program */
12560                 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12561                         continue;
12562
12563                 /* if user already set the link manually, don't attempt auto-attach */
12564                 if (*link)
12565                         continue;
12566
12567                 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
12568                 if (err) {
12569                         pr_warn("prog '%s': failed to auto-attach: %d\n",
12570                                 bpf_program__name(prog), err);
12571                         return libbpf_err(err);
12572                 }
12573
12574                 /* It's possible that for some SEC() definitions auto-attach
12575                  * is supported in some cases (e.g., if definition completely
12576                  * specifies target information), but is not in other cases.
12577                  * SEC("uprobe") is one such case. If user specified target
12578                  * binary and function name, such BPF program can be
12579                  * auto-attached. But if not, it shouldn't trigger skeleton's
12580                  * attach to fail. It should just be skipped.
12581                  * attach_fn signals such case with returning 0 (no error) and
12582                  * setting link to NULL.
12583                  */
12584         }
12585
12586         return 0;
12587 }
12588
12589 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
12590 {
12591         int i;
12592
12593         for (i = 0; i < s->prog_cnt; i++) {
12594                 struct bpf_link **link = s->progs[i].link;
12595
12596                 bpf_link__destroy(*link);
12597                 *link = NULL;
12598         }
12599 }
12600
12601 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
12602 {
12603         if (!s)
12604                 return;
12605
12606         if (s->progs)
12607                 bpf_object__detach_skeleton(s);
12608         if (s->obj)
12609                 bpf_object__close(*s->obj);
12610         free(s->maps);
12611         free(s->progs);
12612         free(s);
12613 }