1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * Common eBPF ELF object loading operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/list.h>
35 #include <linux/limits.h>
36 #include <linux/perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <linux/version.h>
39 #include <sys/epoll.h>
40 #include <sys/ioctl.h>
43 #include <sys/types.h>
45 #include <sys/utsname.h>
46 #include <sys/resource.h>
47 #include <tools/libc_compat.h>
55 #include "str_error.h"
56 #include "libbpf_internal.h"
59 /* make sure libbpf doesn't use kernel-only integer typedefs */
60 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
67 #define BPF_FS_MAGIC 0xcafe4a11
70 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
71 * compilation if user enables corresponding warning. Disable it explicitly.
73 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
75 #define __printf(a, b) __attribute__((format(printf, a, b)))
77 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
78 static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
80 static const struct btf_type *
81 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
83 static int __base_pr(enum libbpf_print_level level, const char *format,
86 if (level == LIBBPF_DEBUG)
89 return vfprintf(stderr, format, args);
92 static libbpf_print_fn_t __libbpf_pr = __base_pr;
94 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
96 libbpf_print_fn_t old_print_fn = __libbpf_pr;
103 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
110 va_start(args, format);
111 __libbpf_pr(level, format, args);
115 static void pr_perm_msg(int err)
120 if (err != -EPERM || geteuid() != 0)
123 err = getrlimit(RLIMIT_MEMLOCK, &limit);
127 if (limit.rlim_cur == RLIM_INFINITY)
130 if (limit.rlim_cur < 1024)
131 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
132 else if (limit.rlim_cur < 1024*1024)
133 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
135 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
137 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
141 #define STRERR_BUFSIZE 128
143 /* Copied from tools/perf/util/util.h */
145 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
149 # define zclose(fd) ({ \
152 ___err = close((fd)); \
157 #ifdef HAVE_LIBELF_MMAP_SUPPORT
158 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
160 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
163 static inline __u64 ptr_to_u64(const void *ptr)
165 return (__u64) (unsigned long) ptr;
168 struct bpf_capabilities {
169 /* v4.14: kernel support for program & map names. */
171 /* v5.2: kernel support for global data sections. */
173 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
175 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
177 /* BPF_F_MMAPABLE is supported for arrays */
179 /* BTF_FUNC_GLOBAL is supported */
180 __u32 btf_func_global:1;
181 /* kernel support for expected_attach_type in BPF_PROG_LOAD */
182 __u32 exp_attach_type:1;
193 enum reloc_type type;
201 typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
202 struct bpf_program *prog);
207 enum bpf_prog_type prog_type;
208 enum bpf_attach_type expected_attach_type;
209 bool is_exp_attach_type_optional;
212 attach_fn_t attach_fn;
216 * bpf_prog should be a better name but it has been used in
220 /* Index in elf obj file, for relocation use. */
225 const struct bpf_sec_def *sec_def;
226 /* section_name with / replaced by _; makes recursive pinning
227 * in bpf_object__pin_programs easier
230 struct bpf_insn *insns;
231 size_t insns_cnt, main_prog_cnt;
232 enum bpf_prog_type type;
234 struct reloc_desc *reloc_desc;
242 bpf_program_prep_t preprocessor;
244 struct bpf_object *obj;
246 bpf_program_clear_priv_t clear_priv;
248 enum bpf_attach_type expected_attach_type;
250 __u32 attach_prog_fd;
252 __u32 func_info_rec_size;
255 struct bpf_capabilities *caps;
258 __u32 line_info_rec_size;
263 struct bpf_struct_ops {
265 const struct btf_type *type;
266 struct bpf_program **progs;
267 __u32 *kern_func_off;
268 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
270 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
271 * btf_vmlinux's format.
272 * struct bpf_struct_ops_tcp_congestion_ops {
273 * [... some other kernel fields ...]
274 * struct tcp_congestion_ops data;
276 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
277 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
284 #define DATA_SEC ".data"
285 #define BSS_SEC ".bss"
286 #define RODATA_SEC ".rodata"
287 #define KCONFIG_SEC ".kconfig"
288 #define STRUCT_OPS_SEC ".struct_ops"
290 enum libbpf_map_type {
298 static const char * const libbpf_type_to_btf_name[] = {
299 [LIBBPF_MAP_DATA] = DATA_SEC,
300 [LIBBPF_MAP_BSS] = BSS_SEC,
301 [LIBBPF_MAP_RODATA] = RODATA_SEC,
302 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
312 struct bpf_map_def def;
314 __u32 btf_key_type_id;
315 __u32 btf_value_type_id;
316 __u32 btf_vmlinux_value_type_id;
318 bpf_map_clear_priv_t clear_priv;
319 enum libbpf_map_type libbpf_type;
321 struct bpf_struct_ops *st_ops;
322 struct bpf_map *inner_map;
343 enum extern_type type;
352 static LIST_HEAD(bpf_objects_list);
355 char name[BPF_OBJ_NAME_LEN];
359 struct bpf_program *programs;
361 struct bpf_map *maps;
366 struct extern_desc *externs;
371 bool has_pseudo_calls;
374 * Information when doing elf related work. Only valid if fd
387 Elf_Data *st_ops_data;
396 __u32 btf_maps_sec_btf_id;
405 * All loaded bpf_object is linked in a list, which is
406 * hidden to caller. bpf_objects__<func> handlers deal with
409 struct list_head list;
412 /* Parse and load BTF vmlinux if any of the programs in the object need
415 struct btf *btf_vmlinux;
416 struct btf_ext *btf_ext;
419 bpf_object_clear_priv_t clear_priv;
421 struct bpf_capabilities caps;
425 #define obj_elf_valid(o) ((o)->efile.elf)
427 void bpf_program__unload(struct bpf_program *prog)
435 * If the object is opened but the program was never loaded,
436 * it is possible that prog->instances.nr == -1.
438 if (prog->instances.nr > 0) {
439 for (i = 0; i < prog->instances.nr; i++)
440 zclose(prog->instances.fds[i]);
441 } else if (prog->instances.nr != -1) {
442 pr_warn("Internal error: instances.nr is %d\n",
446 prog->instances.nr = -1;
447 zfree(&prog->instances.fds);
449 zfree(&prog->func_info);
450 zfree(&prog->line_info);
453 static void bpf_program__exit(struct bpf_program *prog)
458 if (prog->clear_priv)
459 prog->clear_priv(prog, prog->priv);
462 prog->clear_priv = NULL;
464 bpf_program__unload(prog);
466 zfree(&prog->section_name);
467 zfree(&prog->pin_name);
469 zfree(&prog->reloc_desc);
476 static char *__bpf_program__pin_name(struct bpf_program *prog)
480 name = p = strdup(prog->section_name);
481 while ((p = strchr(p, '/')))
488 bpf_program__init(void *data, size_t size, char *section_name, int idx,
489 struct bpf_program *prog)
491 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
493 if (size == 0 || size % bpf_insn_sz) {
494 pr_warn("corrupted section '%s', size: %zu\n",
499 memset(prog, 0, sizeof(*prog));
501 prog->section_name = strdup(section_name);
502 if (!prog->section_name) {
503 pr_warn("failed to alloc name for prog under section(%d) %s\n",
508 prog->pin_name = __bpf_program__pin_name(prog);
509 if (!prog->pin_name) {
510 pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
515 prog->insns = malloc(size);
517 pr_warn("failed to alloc insns for prog under section %s\n",
521 prog->insns_cnt = size / bpf_insn_sz;
522 memcpy(prog->insns, data, size);
524 prog->instances.fds = NULL;
525 prog->instances.nr = -1;
526 prog->type = BPF_PROG_TYPE_UNSPEC;
530 bpf_program__exit(prog);
535 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
536 char *section_name, int idx)
538 struct bpf_program prog, *progs;
541 err = bpf_program__init(data, size, section_name, idx, &prog);
545 prog.caps = &obj->caps;
546 progs = obj->programs;
547 nr_progs = obj->nr_programs;
549 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
552 * In this case the original obj->programs
553 * is still valid, so don't need special treat for
554 * bpf_close_object().
556 pr_warn("failed to alloc a new program under section '%s'\n",
558 bpf_program__exit(&prog);
562 pr_debug("found program %s\n", prog.section_name);
563 obj->programs = progs;
564 obj->nr_programs = nr_progs + 1;
566 progs[nr_progs] = prog;
571 bpf_object__init_prog_names(struct bpf_object *obj)
573 Elf_Data *symbols = obj->efile.symbols;
574 struct bpf_program *prog;
577 for (pi = 0; pi < obj->nr_programs; pi++) {
578 const char *name = NULL;
580 prog = &obj->programs[pi];
582 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
586 if (!gelf_getsym(symbols, si, &sym))
588 if (sym.st_shndx != prog->idx)
590 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
593 name = elf_strptr(obj->efile.elf,
594 obj->efile.strtabidx,
597 pr_warn("failed to get sym name string for prog %s\n",
599 return -LIBBPF_ERRNO__LIBELF;
603 if (!name && prog->idx == obj->efile.text_shndx)
607 pr_warn("failed to find sym for prog %s\n",
612 prog->name = strdup(name);
614 pr_warn("failed to allocate memory for prog sym %s\n",
623 static __u32 get_kernel_version(void)
625 __u32 major, minor, patch;
629 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
631 return KERNEL_VERSION(major, minor, patch);
634 static const struct btf_member *
635 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
637 struct btf_member *m;
640 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
641 if (btf_member_bit_offset(t, i) == bit_offset)
648 static const struct btf_member *
649 find_member_by_name(const struct btf *btf, const struct btf_type *t,
652 struct btf_member *m;
655 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
656 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
663 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
664 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
665 const char *name, __u32 kind);
668 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
669 const struct btf_type **type, __u32 *type_id,
670 const struct btf_type **vtype, __u32 *vtype_id,
671 const struct btf_member **data_member)
673 const struct btf_type *kern_type, *kern_vtype;
674 const struct btf_member *kern_data_member;
675 __s32 kern_vtype_id, kern_type_id;
678 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
679 if (kern_type_id < 0) {
680 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
684 kern_type = btf__type_by_id(btf, kern_type_id);
686 /* Find the corresponding "map_value" type that will be used
687 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
688 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
691 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
692 tname, BTF_KIND_STRUCT);
693 if (kern_vtype_id < 0) {
694 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
695 STRUCT_OPS_VALUE_PREFIX, tname);
696 return kern_vtype_id;
698 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
700 /* Find "struct tcp_congestion_ops" from
701 * struct bpf_struct_ops_tcp_congestion_ops {
703 * struct tcp_congestion_ops data;
706 kern_data_member = btf_members(kern_vtype);
707 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
708 if (kern_data_member->type == kern_type_id)
711 if (i == btf_vlen(kern_vtype)) {
712 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
713 tname, STRUCT_OPS_VALUE_PREFIX, tname);
718 *type_id = kern_type_id;
720 *vtype_id = kern_vtype_id;
721 *data_member = kern_data_member;
726 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
728 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
731 /* Init the map's fields that depend on kern_btf */
732 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
733 const struct btf *btf,
734 const struct btf *kern_btf)
736 const struct btf_member *member, *kern_member, *kern_data_member;
737 const struct btf_type *type, *kern_type, *kern_vtype;
738 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
739 struct bpf_struct_ops *st_ops;
740 void *data, *kern_data;
744 st_ops = map->st_ops;
746 tname = st_ops->tname;
747 err = find_struct_ops_kern_types(kern_btf, tname,
748 &kern_type, &kern_type_id,
749 &kern_vtype, &kern_vtype_id,
754 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
755 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
757 map->def.value_size = kern_vtype->size;
758 map->btf_vmlinux_value_type_id = kern_vtype_id;
760 st_ops->kern_vdata = calloc(1, kern_vtype->size);
761 if (!st_ops->kern_vdata)
765 kern_data_off = kern_data_member->offset / 8;
766 kern_data = st_ops->kern_vdata + kern_data_off;
768 member = btf_members(type);
769 for (i = 0; i < btf_vlen(type); i++, member++) {
770 const struct btf_type *mtype, *kern_mtype;
771 __u32 mtype_id, kern_mtype_id;
772 void *mdata, *kern_mdata;
773 __s64 msize, kern_msize;
774 __u32 moff, kern_moff;
775 __u32 kern_member_idx;
778 mname = btf__name_by_offset(btf, member->name_off);
779 kern_member = find_member_by_name(kern_btf, kern_type, mname);
781 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
786 kern_member_idx = kern_member - btf_members(kern_type);
787 if (btf_member_bitfield_size(type, i) ||
788 btf_member_bitfield_size(kern_type, kern_member_idx)) {
789 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
794 moff = member->offset / 8;
795 kern_moff = kern_member->offset / 8;
798 kern_mdata = kern_data + kern_moff;
800 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
801 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
803 if (BTF_INFO_KIND(mtype->info) !=
804 BTF_INFO_KIND(kern_mtype->info)) {
805 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
806 map->name, mname, BTF_INFO_KIND(mtype->info),
807 BTF_INFO_KIND(kern_mtype->info));
811 if (btf_is_ptr(mtype)) {
812 struct bpf_program *prog;
814 mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
815 kern_mtype = skip_mods_and_typedefs(kern_btf,
818 if (!btf_is_func_proto(mtype) ||
819 !btf_is_func_proto(kern_mtype)) {
820 pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
825 prog = st_ops->progs[i];
827 pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
832 prog->attach_btf_id = kern_type_id;
833 prog->expected_attach_type = kern_member_idx;
835 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
837 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
838 map->name, mname, prog->name, moff,
844 msize = btf__resolve_size(btf, mtype_id);
845 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
846 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
847 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
848 map->name, mname, (ssize_t)msize,
849 (ssize_t)kern_msize);
853 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
854 map->name, mname, (unsigned int)msize,
856 memcpy(kern_mdata, mdata, msize);
862 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
868 for (i = 0; i < obj->nr_maps; i++) {
871 if (!bpf_map__is_struct_ops(map))
874 err = bpf_map__init_kern_struct_ops(map, obj->btf,
883 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
885 const struct btf_type *type, *datasec;
886 const struct btf_var_secinfo *vsi;
887 struct bpf_struct_ops *st_ops;
888 const char *tname, *var_name;
889 __s32 type_id, datasec_id;
890 const struct btf *btf;
894 if (obj->efile.st_ops_shndx == -1)
898 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
900 if (datasec_id < 0) {
901 pr_warn("struct_ops init: DATASEC %s not found\n",
906 datasec = btf__type_by_id(btf, datasec_id);
907 vsi = btf_var_secinfos(datasec);
908 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
909 type = btf__type_by_id(obj->btf, vsi->type);
910 var_name = btf__name_by_offset(obj->btf, type->name_off);
912 type_id = btf__resolve_type(obj->btf, vsi->type);
914 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
915 vsi->type, STRUCT_OPS_SEC);
919 type = btf__type_by_id(obj->btf, type_id);
920 tname = btf__name_by_offset(obj->btf, type->name_off);
922 pr_warn("struct_ops init: anonymous type is not supported\n");
925 if (!btf_is_struct(type)) {
926 pr_warn("struct_ops init: %s is not a struct\n", tname);
930 map = bpf_object__add_map(obj);
934 map->sec_idx = obj->efile.st_ops_shndx;
935 map->sec_offset = vsi->offset;
936 map->name = strdup(var_name);
940 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
941 map->def.key_size = sizeof(int);
942 map->def.value_size = type->size;
943 map->def.max_entries = 1;
945 map->st_ops = calloc(1, sizeof(*map->st_ops));
948 st_ops = map->st_ops;
949 st_ops->data = malloc(type->size);
950 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
951 st_ops->kern_func_off = malloc(btf_vlen(type) *
952 sizeof(*st_ops->kern_func_off));
953 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
956 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
957 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
958 var_name, STRUCT_OPS_SEC);
963 obj->efile.st_ops_data->d_buf + vsi->offset,
965 st_ops->tname = tname;
967 st_ops->type_id = type_id;
969 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
970 tname, type_id, var_name, vsi->offset);
976 static struct bpf_object *bpf_object__new(const char *path,
979 const char *obj_name)
981 struct bpf_object *obj;
984 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
986 pr_warn("alloc memory failed for %s\n", path);
987 return ERR_PTR(-ENOMEM);
990 strcpy(obj->path, path);
992 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
993 obj->name[sizeof(obj->name) - 1] = 0;
995 /* Using basename() GNU version which doesn't modify arg. */
996 strncpy(obj->name, basename((void *)path),
997 sizeof(obj->name) - 1);
998 end = strchr(obj->name, '.');
1005 * Caller of this function should also call
1006 * bpf_object__elf_finish() after data collection to return
1007 * obj_buf to user. If not, we should duplicate the buffer to
1008 * avoid user freeing them before elf finish.
1010 obj->efile.obj_buf = obj_buf;
1011 obj->efile.obj_buf_sz = obj_buf_sz;
1012 obj->efile.maps_shndx = -1;
1013 obj->efile.btf_maps_shndx = -1;
1014 obj->efile.data_shndx = -1;
1015 obj->efile.rodata_shndx = -1;
1016 obj->efile.bss_shndx = -1;
1017 obj->efile.st_ops_shndx = -1;
1018 obj->kconfig_map_idx = -1;
1020 obj->kern_version = get_kernel_version();
1021 obj->loaded = false;
1023 INIT_LIST_HEAD(&obj->list);
1024 list_add(&obj->list, &bpf_objects_list);
1028 static void bpf_object__elf_finish(struct bpf_object *obj)
1030 if (!obj_elf_valid(obj))
1033 if (obj->efile.elf) {
1034 elf_end(obj->efile.elf);
1035 obj->efile.elf = NULL;
1037 obj->efile.symbols = NULL;
1038 obj->efile.data = NULL;
1039 obj->efile.rodata = NULL;
1040 obj->efile.bss = NULL;
1041 obj->efile.st_ops_data = NULL;
1043 zfree(&obj->efile.reloc_sects);
1044 obj->efile.nr_reloc_sects = 0;
1045 zclose(obj->efile.fd);
1046 obj->efile.obj_buf = NULL;
1047 obj->efile.obj_buf_sz = 0;
1050 static int bpf_object__elf_init(struct bpf_object *obj)
1055 if (obj_elf_valid(obj)) {
1056 pr_warn("elf init: internal error\n");
1057 return -LIBBPF_ERRNO__LIBELF;
1060 if (obj->efile.obj_buf_sz > 0) {
1062 * obj_buf should have been validated by
1063 * bpf_object__open_buffer().
1065 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1066 obj->efile.obj_buf_sz);
1068 obj->efile.fd = open(obj->path, O_RDONLY);
1069 if (obj->efile.fd < 0) {
1070 char errmsg[STRERR_BUFSIZE], *cp;
1073 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1074 pr_warn("failed to open %s: %s\n", obj->path, cp);
1078 obj->efile.elf = elf_begin(obj->efile.fd,
1079 LIBBPF_ELF_C_READ_MMAP, NULL);
1082 if (!obj->efile.elf) {
1083 pr_warn("failed to open %s as ELF file\n", obj->path);
1084 err = -LIBBPF_ERRNO__LIBELF;
1088 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1089 pr_warn("failed to get EHDR from %s\n", obj->path);
1090 err = -LIBBPF_ERRNO__FORMAT;
1093 ep = &obj->efile.ehdr;
1095 /* Old LLVM set e_machine to EM_NONE */
1096 if (ep->e_type != ET_REL ||
1097 (ep->e_machine && ep->e_machine != EM_BPF)) {
1098 pr_warn("%s is not an eBPF object file\n", obj->path);
1099 err = -LIBBPF_ERRNO__FORMAT;
1105 bpf_object__elf_finish(obj);
1109 static int bpf_object__check_endianness(struct bpf_object *obj)
1111 #if __BYTE_ORDER == __LITTLE_ENDIAN
1112 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1114 #elif __BYTE_ORDER == __BIG_ENDIAN
1115 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1118 # error "Unrecognized __BYTE_ORDER__"
1120 pr_warn("endianness mismatch.\n");
1121 return -LIBBPF_ERRNO__ENDIAN;
1125 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1127 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1128 pr_debug("license of %s is %s\n", obj->path, obj->license);
1133 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1137 if (size != sizeof(kver)) {
1138 pr_warn("invalid kver section in %s\n", obj->path);
1139 return -LIBBPF_ERRNO__FORMAT;
1141 memcpy(&kver, data, sizeof(kver));
1142 obj->kern_version = kver;
1143 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1147 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1149 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1150 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1155 static int bpf_object_search_section_size(const struct bpf_object *obj,
1156 const char *name, size_t *d_size)
1158 const GElf_Ehdr *ep = &obj->efile.ehdr;
1159 Elf *elf = obj->efile.elf;
1160 Elf_Scn *scn = NULL;
1163 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1164 const char *sec_name;
1169 if (gelf_getshdr(scn, &sh) != &sh) {
1170 pr_warn("failed to get section(%d) header from %s\n",
1175 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1177 pr_warn("failed to get section(%d) name from %s\n",
1182 if (strcmp(name, sec_name))
1185 data = elf_getdata(scn, 0);
1187 pr_warn("failed to get section(%d) data from %s(%s)\n",
1188 idx, name, obj->path);
1192 *d_size = data->d_size;
1199 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1208 } else if (!strcmp(name, DATA_SEC)) {
1209 if (obj->efile.data)
1210 *size = obj->efile.data->d_size;
1211 } else if (!strcmp(name, BSS_SEC)) {
1213 *size = obj->efile.bss->d_size;
1214 } else if (!strcmp(name, RODATA_SEC)) {
1215 if (obj->efile.rodata)
1216 *size = obj->efile.rodata->d_size;
1217 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1218 if (obj->efile.st_ops_data)
1219 *size = obj->efile.st_ops_data->d_size;
1221 ret = bpf_object_search_section_size(obj, name, &d_size);
1226 return *size ? 0 : ret;
1229 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1232 Elf_Data *symbols = obj->efile.symbols;
1239 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1242 if (!gelf_getsym(symbols, si, &sym))
1244 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1245 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1248 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1251 pr_warn("failed to get sym name string for var %s\n",
1255 if (strcmp(name, sname) == 0) {
1256 *off = sym.st_value;
1264 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1266 struct bpf_map *new_maps;
1270 if (obj->nr_maps < obj->maps_cap)
1271 return &obj->maps[obj->nr_maps++];
1273 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1274 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
1276 pr_warn("alloc maps for object failed\n");
1277 return ERR_PTR(-ENOMEM);
1280 obj->maps_cap = new_cap;
1281 obj->maps = new_maps;
1283 /* zero out new maps */
1284 memset(obj->maps + obj->nr_maps, 0,
1285 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1287 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1288 * when failure (zclose won't close negative fd)).
1290 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1291 obj->maps[i].fd = -1;
1292 obj->maps[i].inner_map_fd = -1;
1295 return &obj->maps[obj->nr_maps++];
1298 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1300 long page_sz = sysconf(_SC_PAGE_SIZE);
1303 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1304 map_sz = roundup(map_sz, page_sz);
1308 static char *internal_map_name(struct bpf_object *obj,
1309 enum libbpf_map_type type)
1311 char map_name[BPF_OBJ_NAME_LEN], *p;
1312 const char *sfx = libbpf_type_to_btf_name[type];
1313 int sfx_len = max((size_t)7, strlen(sfx));
1314 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1317 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1318 sfx_len, libbpf_type_to_btf_name[type]);
1320 /* sanitise map name to characters allowed by kernel */
1321 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1322 if (!isalnum(*p) && *p != '_' && *p != '.')
1325 return strdup(map_name);
1329 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1330 int sec_idx, void *data, size_t data_sz)
1332 struct bpf_map_def *def;
1333 struct bpf_map *map;
1336 map = bpf_object__add_map(obj);
1338 return PTR_ERR(map);
1340 map->libbpf_type = type;
1341 map->sec_idx = sec_idx;
1342 map->sec_offset = 0;
1343 map->name = internal_map_name(obj, type);
1345 pr_warn("failed to alloc map name\n");
1350 def->type = BPF_MAP_TYPE_ARRAY;
1351 def->key_size = sizeof(int);
1352 def->value_size = data_sz;
1353 def->max_entries = 1;
1354 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1355 ? BPF_F_RDONLY_PROG : 0;
1356 def->map_flags |= BPF_F_MMAPABLE;
1358 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1359 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1361 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1362 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1363 if (map->mmaped == MAP_FAILED) {
1366 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1373 memcpy(map->mmaped, data, data_sz);
1375 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1379 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1384 * Populate obj->maps with libbpf internal maps.
1386 if (obj->efile.data_shndx >= 0) {
1387 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1388 obj->efile.data_shndx,
1389 obj->efile.data->d_buf,
1390 obj->efile.data->d_size);
1394 if (obj->efile.rodata_shndx >= 0) {
1395 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1396 obj->efile.rodata_shndx,
1397 obj->efile.rodata->d_buf,
1398 obj->efile.rodata->d_size);
1402 if (obj->efile.bss_shndx >= 0) {
1403 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1404 obj->efile.bss_shndx,
1406 obj->efile.bss->d_size);
1414 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1419 for (i = 0; i < obj->nr_extern; i++) {
1420 if (strcmp(obj->externs[i].name, name) == 0)
1421 return &obj->externs[i];
1426 static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
1429 switch (ext->type) {
1432 pr_warn("extern %s=%c should be tristate or char\n",
1436 *(bool *)ext_val = value == 'y' ? true : false;
1440 *(enum libbpf_tristate *)ext_val = TRI_YES;
1441 else if (value == 'm')
1442 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1443 else /* value == 'n' */
1444 *(enum libbpf_tristate *)ext_val = TRI_NO;
1447 *(char *)ext_val = value;
1453 pr_warn("extern %s=%c should be bool, tristate, or char\n",
1461 static int set_ext_value_str(struct extern_desc *ext, char *ext_val,
1466 if (ext->type != EXT_CHAR_ARR) {
1467 pr_warn("extern %s=%s should char array\n", ext->name, value);
1471 len = strlen(value);
1472 if (value[len - 1] != '"') {
1473 pr_warn("extern '%s': invalid string config '%s'\n",
1480 if (len >= ext->sz) {
1481 pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1482 ext->name, value, len, ext->sz - 1);
1485 memcpy(ext_val, value + 1, len);
1486 ext_val[len] = '\0';
1491 static int parse_u64(const char *value, __u64 *res)
1497 *res = strtoull(value, &value_end, 0);
1500 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1504 pr_warn("failed to parse '%s' as integer completely\n", value);
1510 static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
1512 int bit_sz = ext->sz * 8;
1517 /* Validate that value stored in u64 fits in integer of `ext->sz`
1518 * bytes size without any loss of information. If the target integer
1519 * is signed, we rely on the following limits of integer type of
1520 * Y bits and subsequent transformation:
1522 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1523 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1524 * 0 <= X + 2^(Y-1) < 2^Y
1526 * For unsigned target integer, check that all the (64 - Y) bits are
1530 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1532 return (v >> bit_sz) == 0;
1535 static int set_ext_value_num(struct extern_desc *ext, void *ext_val,
1538 if (ext->type != EXT_INT && ext->type != EXT_CHAR) {
1539 pr_warn("extern %s=%llu should be integer\n",
1540 ext->name, (unsigned long long)value);
1543 if (!is_ext_value_in_range(ext, value)) {
1544 pr_warn("extern %s=%llu value doesn't fit in %d bytes\n",
1545 ext->name, (unsigned long long)value, ext->sz);
1549 case 1: *(__u8 *)ext_val = value; break;
1550 case 2: *(__u16 *)ext_val = value; break;
1551 case 4: *(__u32 *)ext_val = value; break;
1552 case 8: *(__u64 *)ext_val = value; break;
1560 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1561 char *buf, void *data)
1563 struct extern_desc *ext;
1569 if (strncmp(buf, "CONFIG_", 7))
1572 sep = strchr(buf, '=');
1574 pr_warn("failed to parse '%s': no separator\n", buf);
1578 /* Trim ending '\n' */
1580 if (buf[len - 1] == '\n')
1581 buf[len - 1] = '\0';
1582 /* Split on '=' and ensure that a value is present. */
1586 pr_warn("failed to parse '%s': no value\n", buf);
1590 ext = find_extern_by_name(obj, buf);
1591 if (!ext || ext->is_set)
1594 ext_val = data + ext->data_off;
1598 case 'y': case 'n': case 'm':
1599 err = set_ext_value_tri(ext, ext_val, *value);
1602 err = set_ext_value_str(ext, ext_val, value);
1605 /* assume integer */
1606 err = parse_u64(value, &num);
1608 pr_warn("extern %s=%s should be integer\n",
1612 err = set_ext_value_num(ext, ext_val, num);
1617 pr_debug("extern %s=%s\n", ext->name, value);
1621 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1629 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1632 else if (len >= PATH_MAX)
1633 return -ENAMETOOLONG;
1635 /* gzopen also accepts uncompressed files. */
1636 file = gzopen(buf, "r");
1638 file = gzopen("/proc/config.gz", "r");
1641 pr_warn("failed to open system Kconfig\n");
1645 while (gzgets(file, buf, sizeof(buf))) {
1646 err = bpf_object__process_kconfig_line(obj, buf, data);
1648 pr_warn("error parsing system Kconfig line '%s': %d\n",
1659 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1660 const char *config, void *data)
1666 file = fmemopen((void *)config, strlen(config), "r");
1669 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1673 while (fgets(buf, sizeof(buf), file)) {
1674 err = bpf_object__process_kconfig_line(obj, buf, data);
1676 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1686 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1688 struct extern_desc *last_ext;
1692 if (obj->nr_extern == 0)
1695 last_ext = &obj->externs[obj->nr_extern - 1];
1696 map_sz = last_ext->data_off + last_ext->sz;
1698 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1699 obj->efile.symbols_shndx,
1704 obj->kconfig_map_idx = obj->nr_maps - 1;
1709 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1711 Elf_Data *symbols = obj->efile.symbols;
1712 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1713 Elf_Data *data = NULL;
1716 if (obj->efile.maps_shndx < 0)
1722 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
1724 data = elf_getdata(scn, NULL);
1725 if (!scn || !data) {
1726 pr_warn("failed to get Elf_Data from map section %d\n",
1727 obj->efile.maps_shndx);
1732 * Count number of maps. Each map has a name.
1733 * Array of maps is not supported: only the first element is
1736 * TODO: Detect array of map and report error.
1738 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1739 for (i = 0; i < nr_syms; i++) {
1742 if (!gelf_getsym(symbols, i, &sym))
1744 if (sym.st_shndx != obj->efile.maps_shndx)
1748 /* Assume equally sized map definitions */
1749 pr_debug("maps in %s: %d maps in %zd bytes\n",
1750 obj->path, nr_maps, data->d_size);
1752 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1753 pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
1754 obj->path, nr_maps, data->d_size);
1757 map_def_sz = data->d_size / nr_maps;
1759 /* Fill obj->maps using data in "maps" section. */
1760 for (i = 0; i < nr_syms; i++) {
1762 const char *map_name;
1763 struct bpf_map_def *def;
1764 struct bpf_map *map;
1766 if (!gelf_getsym(symbols, i, &sym))
1768 if (sym.st_shndx != obj->efile.maps_shndx)
1771 map = bpf_object__add_map(obj);
1773 return PTR_ERR(map);
1775 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1778 pr_warn("failed to get map #%d name sym string for obj %s\n",
1780 return -LIBBPF_ERRNO__FORMAT;
1783 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1784 map->sec_idx = sym.st_shndx;
1785 map->sec_offset = sym.st_value;
1786 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1787 map_name, map->sec_idx, map->sec_offset);
1788 if (sym.st_value + map_def_sz > data->d_size) {
1789 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1790 obj->path, map_name);
1794 map->name = strdup(map_name);
1796 pr_warn("failed to alloc map name\n");
1799 pr_debug("map %d is \"%s\"\n", i, map->name);
1800 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1802 * If the definition of the map in the object file fits in
1803 * bpf_map_def, copy it. Any extra fields in our version
1804 * of bpf_map_def will default to zero as a result of the
1807 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1808 memcpy(&map->def, def, map_def_sz);
1811 * Here the map structure being read is bigger than what
1812 * we expect, truncate if the excess bits are all zero.
1813 * If they are not zero, reject this map as
1818 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1819 b < ((char *)def) + map_def_sz; b++) {
1821 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1822 obj->path, map_name);
1827 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1833 static const struct btf_type *
1834 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1836 const struct btf_type *t = btf__type_by_id(btf, id);
1841 while (btf_is_mod(t) || btf_is_typedef(t)) {
1844 t = btf__type_by_id(btf, t->type);
1850 static const struct btf_type *
1851 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1853 const struct btf_type *t;
1855 t = skip_mods_and_typedefs(btf, id, NULL);
1859 t = skip_mods_and_typedefs(btf, t->type, res_id);
1861 return btf_is_func_proto(t) ? t : NULL;
1865 * Fetch integer attribute of BTF map definition. Such attributes are
1866 * represented using a pointer to an array, in which dimensionality of array
1867 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1868 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1869 * type definition, while using only sizeof(void *) space in ELF data section.
1871 static bool get_map_field_int(const char *map_name, const struct btf *btf,
1872 const struct btf_member *m, __u32 *res)
1874 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1875 const char *name = btf__name_by_offset(btf, m->name_off);
1876 const struct btf_array *arr_info;
1877 const struct btf_type *arr_t;
1879 if (!btf_is_ptr(t)) {
1880 pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
1881 map_name, name, btf_kind(t));
1885 arr_t = btf__type_by_id(btf, t->type);
1887 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1888 map_name, name, t->type);
1891 if (!btf_is_array(arr_t)) {
1892 pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
1893 map_name, name, btf_kind(arr_t));
1896 arr_info = btf_array(arr_t);
1897 *res = arr_info->nelems;
1901 static int build_map_pin_path(struct bpf_map *map, const char *path)
1907 path = "/sys/fs/bpf";
1909 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1912 else if (len >= PATH_MAX)
1913 return -ENAMETOOLONG;
1915 err = bpf_map__set_pin_path(map, buf);
1923 static int parse_btf_map_def(struct bpf_object *obj,
1924 struct bpf_map *map,
1925 const struct btf_type *def,
1926 bool strict, bool is_inner,
1927 const char *pin_root_path)
1929 const struct btf_type *t;
1930 const struct btf_member *m;
1933 vlen = btf_vlen(def);
1934 m = btf_members(def);
1935 for (i = 0; i < vlen; i++, m++) {
1936 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1939 pr_warn("map '%s': invalid field #%d.\n", map->name, i);
1942 if (strcmp(name, "type") == 0) {
1943 if (!get_map_field_int(map->name, obj->btf, m,
1946 pr_debug("map '%s': found type = %u.\n",
1947 map->name, map->def.type);
1948 } else if (strcmp(name, "max_entries") == 0) {
1949 if (!get_map_field_int(map->name, obj->btf, m,
1950 &map->def.max_entries))
1952 pr_debug("map '%s': found max_entries = %u.\n",
1953 map->name, map->def.max_entries);
1954 } else if (strcmp(name, "map_flags") == 0) {
1955 if (!get_map_field_int(map->name, obj->btf, m,
1956 &map->def.map_flags))
1958 pr_debug("map '%s': found map_flags = %u.\n",
1959 map->name, map->def.map_flags);
1960 } else if (strcmp(name, "key_size") == 0) {
1963 if (!get_map_field_int(map->name, obj->btf, m, &sz))
1965 pr_debug("map '%s': found key_size = %u.\n",
1967 if (map->def.key_size && map->def.key_size != sz) {
1968 pr_warn("map '%s': conflicting key size %u != %u.\n",
1969 map->name, map->def.key_size, sz);
1972 map->def.key_size = sz;
1973 } else if (strcmp(name, "key") == 0) {
1976 t = btf__type_by_id(obj->btf, m->type);
1978 pr_warn("map '%s': key type [%d] not found.\n",
1979 map->name, m->type);
1982 if (!btf_is_ptr(t)) {
1983 pr_warn("map '%s': key spec is not PTR: %u.\n",
1984 map->name, btf_kind(t));
1987 sz = btf__resolve_size(obj->btf, t->type);
1989 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
1990 map->name, t->type, (ssize_t)sz);
1993 pr_debug("map '%s': found key [%u], sz = %zd.\n",
1994 map->name, t->type, (ssize_t)sz);
1995 if (map->def.key_size && map->def.key_size != sz) {
1996 pr_warn("map '%s': conflicting key size %u != %zd.\n",
1997 map->name, map->def.key_size, (ssize_t)sz);
2000 map->def.key_size = sz;
2001 map->btf_key_type_id = t->type;
2002 } else if (strcmp(name, "value_size") == 0) {
2005 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2007 pr_debug("map '%s': found value_size = %u.\n",
2009 if (map->def.value_size && map->def.value_size != sz) {
2010 pr_warn("map '%s': conflicting value size %u != %u.\n",
2011 map->name, map->def.value_size, sz);
2014 map->def.value_size = sz;
2015 } else if (strcmp(name, "value") == 0) {
2018 t = btf__type_by_id(obj->btf, m->type);
2020 pr_warn("map '%s': value type [%d] not found.\n",
2021 map->name, m->type);
2024 if (!btf_is_ptr(t)) {
2025 pr_warn("map '%s': value spec is not PTR: %u.\n",
2026 map->name, btf_kind(t));
2029 sz = btf__resolve_size(obj->btf, t->type);
2031 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2032 map->name, t->type, (ssize_t)sz);
2035 pr_debug("map '%s': found value [%u], sz = %zd.\n",
2036 map->name, t->type, (ssize_t)sz);
2037 if (map->def.value_size && map->def.value_size != sz) {
2038 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2039 map->name, map->def.value_size, (ssize_t)sz);
2042 map->def.value_size = sz;
2043 map->btf_value_type_id = t->type;
2045 else if (strcmp(name, "values") == 0) {
2049 pr_warn("map '%s': multi-level inner maps not supported.\n",
2053 if (i != vlen - 1) {
2054 pr_warn("map '%s': '%s' member should be last.\n",
2058 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2059 pr_warn("map '%s': should be map-in-map.\n",
2063 if (map->def.value_size && map->def.value_size != 4) {
2064 pr_warn("map '%s': conflicting value size %u != 4.\n",
2065 map->name, map->def.value_size);
2068 map->def.value_size = 4;
2069 t = btf__type_by_id(obj->btf, m->type);
2071 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2072 map->name, m->type);
2075 if (!btf_is_array(t) || btf_array(t)->nelems) {
2076 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2080 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2082 if (!btf_is_ptr(t)) {
2083 pr_warn("map '%s': map-in-map inner def is of unexpected kind %u.\n",
2084 map->name, btf_kind(t));
2087 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2088 if (!btf_is_struct(t)) {
2089 pr_warn("map '%s': map-in-map inner def is of unexpected kind %u.\n",
2090 map->name, btf_kind(t));
2094 map->inner_map = calloc(1, sizeof(*map->inner_map));
2095 if (!map->inner_map)
2097 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2098 map->inner_map->name = malloc(strlen(map->name) +
2099 sizeof(".inner") + 1);
2100 if (!map->inner_map->name)
2102 sprintf(map->inner_map->name, "%s.inner", map->name);
2104 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2105 true /* is_inner */, NULL);
2108 } else if (strcmp(name, "pinning") == 0) {
2113 pr_debug("map '%s': inner def can't be pinned.\n",
2117 if (!get_map_field_int(map->name, obj->btf, m, &val))
2119 pr_debug("map '%s': found pinning = %u.\n",
2122 if (val != LIBBPF_PIN_NONE &&
2123 val != LIBBPF_PIN_BY_NAME) {
2124 pr_warn("map '%s': invalid pinning value %u.\n",
2128 if (val == LIBBPF_PIN_BY_NAME) {
2129 err = build_map_pin_path(map, pin_root_path);
2131 pr_warn("map '%s': couldn't build pin path.\n",
2138 pr_warn("map '%s': unknown field '%s'.\n",
2142 pr_debug("map '%s': ignoring unknown field '%s'.\n",
2147 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
2148 pr_warn("map '%s': map type isn't specified.\n", map->name);
2155 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2156 const struct btf_type *sec,
2157 int var_idx, int sec_idx,
2158 const Elf_Data *data, bool strict,
2159 const char *pin_root_path)
2161 const struct btf_type *var, *def;
2162 const struct btf_var_secinfo *vi;
2163 const struct btf_var *var_extra;
2164 const char *map_name;
2165 struct bpf_map *map;
2167 vi = btf_var_secinfos(sec) + var_idx;
2168 var = btf__type_by_id(obj->btf, vi->type);
2169 var_extra = btf_var(var);
2170 map_name = btf__name_by_offset(obj->btf, var->name_off);
2172 if (map_name == NULL || map_name[0] == '\0') {
2173 pr_warn("map #%d: empty name.\n", var_idx);
2176 if ((__u64)vi->offset + vi->size > data->d_size) {
2177 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2180 if (!btf_is_var(var)) {
2181 pr_warn("map '%s': unexpected var kind %u.\n",
2182 map_name, btf_kind(var));
2185 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2186 var_extra->linkage != BTF_VAR_STATIC) {
2187 pr_warn("map '%s': unsupported var linkage %u.\n",
2188 map_name, var_extra->linkage);
2192 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2193 if (!btf_is_struct(def)) {
2194 pr_warn("map '%s': unexpected def kind %u.\n",
2195 map_name, btf_kind(var));
2198 if (def->size > vi->size) {
2199 pr_warn("map '%s': invalid def size.\n", map_name);
2203 map = bpf_object__add_map(obj);
2205 return PTR_ERR(map);
2206 map->name = strdup(map_name);
2208 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2211 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2212 map->def.type = BPF_MAP_TYPE_UNSPEC;
2213 map->sec_idx = sec_idx;
2214 map->sec_offset = vi->offset;
2215 map->btf_var_idx = var_idx;
2216 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2217 map_name, map->sec_idx, map->sec_offset);
2219 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2222 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2223 const char *pin_root_path)
2225 const struct btf_type *sec = NULL;
2226 int nr_types, i, vlen, err;
2227 const struct btf_type *t;
2232 if (obj->efile.btf_maps_shndx < 0)
2235 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
2237 data = elf_getdata(scn, NULL);
2238 if (!scn || !data) {
2239 pr_warn("failed to get Elf_Data from map section %d (%s)\n",
2240 obj->efile.maps_shndx, MAPS_ELF_SEC);
2244 nr_types = btf__get_nr_types(obj->btf);
2245 for (i = 1; i <= nr_types; i++) {
2246 t = btf__type_by_id(obj->btf, i);
2247 if (!btf_is_datasec(t))
2249 name = btf__name_by_offset(obj->btf, t->name_off);
2250 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2252 obj->efile.btf_maps_sec_btf_id = i;
2258 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2262 vlen = btf_vlen(sec);
2263 for (i = 0; i < vlen; i++) {
2264 err = bpf_object__init_user_btf_map(obj, sec, i,
2265 obj->efile.btf_maps_shndx,
2275 static int bpf_object__init_maps(struct bpf_object *obj,
2276 const struct bpf_object_open_opts *opts)
2278 const char *pin_root_path;
2282 strict = !OPTS_GET(opts, relaxed_maps, false);
2283 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2285 err = bpf_object__init_user_maps(obj, strict);
2286 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2287 err = err ?: bpf_object__init_global_data_maps(obj);
2288 err = err ?: bpf_object__init_kconfig_map(obj);
2289 err = err ?: bpf_object__init_struct_ops_maps(obj);
2296 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2301 scn = elf_getscn(obj->efile.elf, idx);
2305 if (gelf_getshdr(scn, &sh) != &sh)
2308 if (sh.sh_flags & SHF_EXECINSTR)
2314 static void bpf_object__sanitize_btf(struct bpf_object *obj)
2316 bool has_func_global = obj->caps.btf_func_global;
2317 bool has_datasec = obj->caps.btf_datasec;
2318 bool has_func = obj->caps.btf_func;
2319 struct btf *btf = obj->btf;
2323 if (!obj->btf || (has_func && has_datasec && has_func_global))
2326 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2327 t = (struct btf_type *)btf__type_by_id(btf, i);
2329 if (!has_datasec && btf_is_var(t)) {
2330 /* replace VAR with INT */
2331 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2333 * using size = 1 is the safest choice, 4 will be too
2334 * big and cause kernel BTF validation failure if
2335 * original variable took less than 4 bytes
2338 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2339 } else if (!has_datasec && btf_is_datasec(t)) {
2340 /* replace DATASEC with STRUCT */
2341 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2342 struct btf_member *m = btf_members(t);
2343 struct btf_type *vt;
2346 name = (char *)btf__name_by_offset(btf, t->name_off);
2354 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2355 for (j = 0; j < vlen; j++, v++, m++) {
2356 /* order of field assignments is important */
2357 m->offset = v->offset * 8;
2359 /* preserve variable name as member name */
2360 vt = (void *)btf__type_by_id(btf, v->type);
2361 m->name_off = vt->name_off;
2363 } else if (!has_func && btf_is_func_proto(t)) {
2364 /* replace FUNC_PROTO with ENUM */
2366 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2367 t->size = sizeof(__u32); /* kernel enforced */
2368 } else if (!has_func && btf_is_func(t)) {
2369 /* replace FUNC with TYPEDEF */
2370 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2371 } else if (!has_func_global && btf_is_func(t)) {
2372 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2373 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2378 static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
2383 if (!obj->caps.btf_func) {
2384 btf_ext__free(obj->btf_ext);
2385 obj->btf_ext = NULL;
2389 static bool libbpf_needs_btf(const struct bpf_object *obj)
2391 return obj->efile.btf_maps_shndx >= 0 ||
2392 obj->efile.st_ops_shndx >= 0 ||
2396 static bool kernel_needs_btf(const struct bpf_object *obj)
2398 return obj->efile.st_ops_shndx >= 0;
2401 static int bpf_object__init_btf(struct bpf_object *obj,
2403 Elf_Data *btf_ext_data)
2408 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2409 if (IS_ERR(obj->btf)) {
2410 err = PTR_ERR(obj->btf);
2412 pr_warn("Error loading ELF section %s: %d.\n",
2420 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2421 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2424 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2425 btf_ext_data->d_size);
2426 if (IS_ERR(obj->btf_ext)) {
2427 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2428 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2429 obj->btf_ext = NULL;
2434 if (err && libbpf_needs_btf(obj)) {
2435 pr_warn("BTF is required, but is missing or corrupted.\n");
2441 static int bpf_object__finalize_btf(struct bpf_object *obj)
2448 err = btf__finalize_data(obj, obj->btf);
2452 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2453 btf__free(obj->btf);
2455 btf_ext__free(obj->btf_ext);
2456 obj->btf_ext = NULL;
2458 if (libbpf_needs_btf(obj)) {
2459 pr_warn("BTF is required, but is missing or corrupted.\n");
2465 static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
2467 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2468 prog->type == BPF_PROG_TYPE_LSM)
2471 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2472 * also need vmlinux BTF
2474 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2480 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
2482 struct bpf_program *prog;
2485 bpf_object__for_each_program(prog, obj) {
2486 if (libbpf_prog_needs_vmlinux_btf(prog)) {
2487 obj->btf_vmlinux = libbpf_find_kernel_btf();
2488 if (IS_ERR(obj->btf_vmlinux)) {
2489 err = PTR_ERR(obj->btf_vmlinux);
2490 pr_warn("Error loading vmlinux BTF: %d\n", err);
2491 obj->btf_vmlinux = NULL;
2501 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2508 bpf_object__sanitize_btf(obj);
2509 bpf_object__sanitize_btf_ext(obj);
2511 err = btf__load(obj->btf);
2513 pr_warn("Error loading %s into kernel: %d.\n",
2515 btf__free(obj->btf);
2517 /* btf_ext can't exist without btf, so free it as well */
2519 btf_ext__free(obj->btf_ext);
2520 obj->btf_ext = NULL;
2523 if (kernel_needs_btf(obj))
2529 static int bpf_object__elf_collect(struct bpf_object *obj)
2531 Elf *elf = obj->efile.elf;
2532 GElf_Ehdr *ep = &obj->efile.ehdr;
2533 Elf_Data *btf_ext_data = NULL;
2534 Elf_Data *btf_data = NULL;
2535 Elf_Scn *scn = NULL;
2536 int idx = 0, err = 0;
2538 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
2539 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
2540 pr_warn("failed to get e_shstrndx from %s\n", obj->path);
2541 return -LIBBPF_ERRNO__FORMAT;
2544 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2550 if (gelf_getshdr(scn, &sh) != &sh) {
2551 pr_warn("failed to get section(%d) header from %s\n",
2553 return -LIBBPF_ERRNO__FORMAT;
2556 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
2558 pr_warn("failed to get section(%d) name from %s\n",
2560 return -LIBBPF_ERRNO__FORMAT;
2563 data = elf_getdata(scn, 0);
2565 pr_warn("failed to get section(%d) data from %s(%s)\n",
2566 idx, name, obj->path);
2567 return -LIBBPF_ERRNO__FORMAT;
2569 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2570 idx, name, (unsigned long)data->d_size,
2571 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2574 if (strcmp(name, "license") == 0) {
2575 err = bpf_object__init_license(obj,
2580 } else if (strcmp(name, "version") == 0) {
2581 err = bpf_object__init_kversion(obj,
2586 } else if (strcmp(name, "maps") == 0) {
2587 obj->efile.maps_shndx = idx;
2588 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2589 obj->efile.btf_maps_shndx = idx;
2590 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2592 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2593 btf_ext_data = data;
2594 } else if (sh.sh_type == SHT_SYMTAB) {
2595 if (obj->efile.symbols) {
2596 pr_warn("bpf: multiple SYMTAB in %s\n",
2598 return -LIBBPF_ERRNO__FORMAT;
2600 obj->efile.symbols = data;
2601 obj->efile.symbols_shndx = idx;
2602 obj->efile.strtabidx = sh.sh_link;
2603 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2604 if (sh.sh_flags & SHF_EXECINSTR) {
2605 if (strcmp(name, ".text") == 0)
2606 obj->efile.text_shndx = idx;
2607 err = bpf_object__add_program(obj, data->d_buf,
2611 char errmsg[STRERR_BUFSIZE];
2614 cp = libbpf_strerror_r(-err, errmsg,
2616 pr_warn("failed to alloc program %s (%s): %s",
2617 name, obj->path, cp);
2620 } else if (strcmp(name, DATA_SEC) == 0) {
2621 obj->efile.data = data;
2622 obj->efile.data_shndx = idx;
2623 } else if (strcmp(name, RODATA_SEC) == 0) {
2624 obj->efile.rodata = data;
2625 obj->efile.rodata_shndx = idx;
2626 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2627 obj->efile.st_ops_data = data;
2628 obj->efile.st_ops_shndx = idx;
2630 pr_debug("skip section(%d) %s\n", idx, name);
2632 } else if (sh.sh_type == SHT_REL) {
2633 int nr_sects = obj->efile.nr_reloc_sects;
2634 void *sects = obj->efile.reloc_sects;
2635 int sec = sh.sh_info; /* points to other section */
2637 /* Only do relo for section with exec instructions */
2638 if (!section_have_execinstr(obj, sec) &&
2639 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2640 strcmp(name, ".rel" MAPS_ELF_SEC)) {
2641 pr_debug("skip relo %s(%d) for section(%d)\n",
2646 sects = reallocarray(sects, nr_sects + 1,
2647 sizeof(*obj->efile.reloc_sects));
2649 pr_warn("reloc_sects realloc failed\n");
2653 obj->efile.reloc_sects = sects;
2654 obj->efile.nr_reloc_sects++;
2656 obj->efile.reloc_sects[nr_sects].shdr = sh;
2657 obj->efile.reloc_sects[nr_sects].data = data;
2658 } else if (sh.sh_type == SHT_NOBITS &&
2659 strcmp(name, BSS_SEC) == 0) {
2660 obj->efile.bss = data;
2661 obj->efile.bss_shndx = idx;
2663 pr_debug("skip section(%d) %s\n", idx, name);
2667 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2668 pr_warn("Corrupted ELF file: index of strtab invalid\n");
2669 return -LIBBPF_ERRNO__FORMAT;
2671 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
2674 static bool sym_is_extern(const GElf_Sym *sym)
2676 int bind = GELF_ST_BIND(sym->st_info);
2677 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2678 return sym->st_shndx == SHN_UNDEF &&
2679 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2680 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2683 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2685 const struct btf_type *t;
2686 const char *var_name;
2692 n = btf__get_nr_types(btf);
2693 for (i = 1; i <= n; i++) {
2694 t = btf__type_by_id(btf, i);
2699 var_name = btf__name_by_offset(btf, t->name_off);
2700 if (strcmp(var_name, ext_name))
2703 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2712 static enum extern_type find_extern_type(const struct btf *btf, int id,
2715 const struct btf_type *t;
2718 t = skip_mods_and_typedefs(btf, id, NULL);
2719 name = btf__name_by_offset(btf, t->name_off);
2723 switch (btf_kind(t)) {
2724 case BTF_KIND_INT: {
2725 int enc = btf_int_encoding(t);
2727 if (enc & BTF_INT_BOOL)
2728 return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN;
2730 *is_signed = enc & BTF_INT_SIGNED;
2733 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
2740 if (strcmp(name, "libbpf_tristate"))
2742 return EXT_TRISTATE;
2743 case BTF_KIND_ARRAY:
2744 if (btf_array(t)->nelems == 0)
2746 if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR)
2748 return EXT_CHAR_ARR;
2754 static int cmp_externs(const void *_a, const void *_b)
2756 const struct extern_desc *a = _a;
2757 const struct extern_desc *b = _b;
2759 /* descending order by alignment requirements */
2760 if (a->align != b->align)
2761 return a->align > b->align ? -1 : 1;
2762 /* ascending order by size, within same alignment class */
2764 return a->sz < b->sz ? -1 : 1;
2765 /* resolve ties by name */
2766 return strcmp(a->name, b->name);
2769 static int bpf_object__collect_externs(struct bpf_object *obj)
2771 const struct btf_type *t;
2772 struct extern_desc *ext;
2773 int i, n, off, btf_id;
2774 struct btf_type *sec;
2775 const char *ext_name;
2779 if (!obj->efile.symbols)
2782 scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
2784 return -LIBBPF_ERRNO__FORMAT;
2785 if (gelf_getshdr(scn, &sh) != &sh)
2786 return -LIBBPF_ERRNO__FORMAT;
2787 n = sh.sh_size / sh.sh_entsize;
2789 pr_debug("looking for externs among %d symbols...\n", n);
2790 for (i = 0; i < n; i++) {
2793 if (!gelf_getsym(obj->efile.symbols, i, &sym))
2794 return -LIBBPF_ERRNO__FORMAT;
2795 if (!sym_is_extern(&sym))
2797 ext_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
2799 if (!ext_name || !ext_name[0])
2803 ext = reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
2807 ext = &ext[obj->nr_extern];
2808 memset(ext, 0, sizeof(*ext));
2811 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
2812 if (ext->btf_id <= 0) {
2813 pr_warn("failed to find BTF for extern '%s': %d\n",
2814 ext_name, ext->btf_id);
2817 t = btf__type_by_id(obj->btf, ext->btf_id);
2818 ext->name = btf__name_by_offset(obj->btf, t->name_off);
2820 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
2821 ext->sz = btf__resolve_size(obj->btf, t->type);
2823 pr_warn("failed to resolve size of extern '%s': %d\n",
2827 ext->align = btf__align_of(obj->btf, t->type);
2828 if (ext->align <= 0) {
2829 pr_warn("failed to determine alignment of extern '%s': %d\n",
2830 ext_name, ext->align);
2833 ext->type = find_extern_type(obj->btf, t->type,
2835 if (ext->type == EXT_UNKNOWN) {
2836 pr_warn("extern '%s' type is unsupported\n", ext_name);
2840 pr_debug("collected %d externs total\n", obj->nr_extern);
2842 if (!obj->nr_extern)
2845 /* sort externs by (alignment, size, name) and calculate their offsets
2847 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
2849 for (i = 0; i < obj->nr_extern; i++) {
2850 ext = &obj->externs[i];
2851 ext->data_off = roundup(off, ext->align);
2852 off = ext->data_off + ext->sz;
2853 pr_debug("extern #%d: symbol %d, off %u, name %s\n",
2854 i, ext->sym_idx, ext->data_off, ext->name);
2857 btf_id = btf__find_by_name(obj->btf, KCONFIG_SEC);
2859 pr_warn("no BTF info found for '%s' datasec\n", KCONFIG_SEC);
2863 sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id);
2866 for (i = 0; i < n; i++) {
2867 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
2869 t = btf__type_by_id(obj->btf, vs->type);
2870 ext_name = btf__name_by_offset(obj->btf, t->name_off);
2871 ext = find_extern_by_name(obj, ext_name);
2873 pr_warn("failed to find extern definition for BTF var '%s'\n",
2877 vs->offset = ext->data_off;
2878 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
2884 static struct bpf_program *
2885 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
2887 struct bpf_program *prog;
2890 for (i = 0; i < obj->nr_programs; i++) {
2891 prog = &obj->programs[i];
2892 if (prog->idx == idx)
2898 struct bpf_program *
2899 bpf_object__find_program_by_title(const struct bpf_object *obj,
2902 struct bpf_program *pos;
2904 bpf_object__for_each_program(pos, obj) {
2905 if (pos->section_name && !strcmp(pos->section_name, title))
2911 struct bpf_program *
2912 bpf_object__find_program_by_name(const struct bpf_object *obj,
2915 struct bpf_program *prog;
2917 bpf_object__for_each_program(prog, obj) {
2918 if (!strcmp(prog->name, name))
2924 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
2927 return shndx == obj->efile.data_shndx ||
2928 shndx == obj->efile.bss_shndx ||
2929 shndx == obj->efile.rodata_shndx;
2932 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
2935 return shndx == obj->efile.maps_shndx ||
2936 shndx == obj->efile.btf_maps_shndx;
2939 static enum libbpf_map_type
2940 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
2942 if (shndx == obj->efile.data_shndx)
2943 return LIBBPF_MAP_DATA;
2944 else if (shndx == obj->efile.bss_shndx)
2945 return LIBBPF_MAP_BSS;
2946 else if (shndx == obj->efile.rodata_shndx)
2947 return LIBBPF_MAP_RODATA;
2948 else if (shndx == obj->efile.symbols_shndx)
2949 return LIBBPF_MAP_KCONFIG;
2951 return LIBBPF_MAP_UNSPEC;
2954 static int bpf_program__record_reloc(struct bpf_program *prog,
2955 struct reloc_desc *reloc_desc,
2956 __u32 insn_idx, const char *name,
2957 const GElf_Sym *sym, const GElf_Rel *rel)
2959 struct bpf_insn *insn = &prog->insns[insn_idx];
2960 size_t map_idx, nr_maps = prog->obj->nr_maps;
2961 struct bpf_object *obj = prog->obj;
2962 __u32 shdr_idx = sym->st_shndx;
2963 enum libbpf_map_type type;
2964 struct bpf_map *map;
2966 /* sub-program call relocation */
2967 if (insn->code == (BPF_JMP | BPF_CALL)) {
2968 if (insn->src_reg != BPF_PSEUDO_CALL) {
2969 pr_warn("incorrect bpf_call opcode\n");
2970 return -LIBBPF_ERRNO__RELOC;
2972 /* text_shndx can be 0, if no default "main" program exists */
2973 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
2974 pr_warn("bad call relo against section %u\n", shdr_idx);
2975 return -LIBBPF_ERRNO__RELOC;
2977 if (sym->st_value % 8) {
2978 pr_warn("bad call relo offset: %zu\n",
2979 (size_t)sym->st_value);
2980 return -LIBBPF_ERRNO__RELOC;
2982 reloc_desc->type = RELO_CALL;
2983 reloc_desc->insn_idx = insn_idx;
2984 reloc_desc->sym_off = sym->st_value;
2985 obj->has_pseudo_calls = true;
2989 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
2990 pr_warn("invalid relo for insns[%d].code 0x%x\n",
2991 insn_idx, insn->code);
2992 return -LIBBPF_ERRNO__RELOC;
2995 if (sym_is_extern(sym)) {
2996 int sym_idx = GELF_R_SYM(rel->r_info);
2997 int i, n = obj->nr_extern;
2998 struct extern_desc *ext;
3000 for (i = 0; i < n; i++) {
3001 ext = &obj->externs[i];
3002 if (ext->sym_idx == sym_idx)
3006 pr_warn("extern relo failed to find extern for sym %d\n",
3008 return -LIBBPF_ERRNO__RELOC;
3010 pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n",
3011 i, ext->name, ext->sym_idx, ext->data_off, insn_idx);
3012 reloc_desc->type = RELO_EXTERN;
3013 reloc_desc->insn_idx = insn_idx;
3014 reloc_desc->sym_off = ext->data_off;
3018 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3019 pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
3021 return -LIBBPF_ERRNO__RELOC;
3024 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3026 /* generic map reference relocation */
3027 if (type == LIBBPF_MAP_UNSPEC) {
3028 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3029 pr_warn("bad map relo against section %u\n",
3031 return -LIBBPF_ERRNO__RELOC;
3033 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3034 map = &obj->maps[map_idx];
3035 if (map->libbpf_type != type ||
3036 map->sec_idx != sym->st_shndx ||
3037 map->sec_offset != sym->st_value)
3039 pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
3040 map_idx, map->name, map->sec_idx,
3041 map->sec_offset, insn_idx);
3044 if (map_idx >= nr_maps) {
3045 pr_warn("map relo failed to find map for sec %u, off %zu\n",
3046 shdr_idx, (size_t)sym->st_value);
3047 return -LIBBPF_ERRNO__RELOC;
3049 reloc_desc->type = RELO_LD64;
3050 reloc_desc->insn_idx = insn_idx;
3051 reloc_desc->map_idx = map_idx;
3052 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3056 /* global data map relocation */
3057 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3058 pr_warn("bad data relo against section %u\n", shdr_idx);
3059 return -LIBBPF_ERRNO__RELOC;
3061 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3062 map = &obj->maps[map_idx];
3063 if (map->libbpf_type != type)
3065 pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3066 map_idx, map->name, map->sec_idx, map->sec_offset,
3070 if (map_idx >= nr_maps) {
3071 pr_warn("data relo failed to find map for sec %u\n",
3073 return -LIBBPF_ERRNO__RELOC;
3076 reloc_desc->type = RELO_DATA;
3077 reloc_desc->insn_idx = insn_idx;
3078 reloc_desc->map_idx = map_idx;
3079 reloc_desc->sym_off = sym->st_value;
3084 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
3085 Elf_Data *data, struct bpf_object *obj)
3087 Elf_Data *symbols = obj->efile.symbols;
3090 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
3091 nrels = shdr->sh_size / shdr->sh_entsize;
3093 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
3094 if (!prog->reloc_desc) {
3095 pr_warn("failed to alloc memory in relocation\n");
3098 prog->nr_reloc = nrels;
3100 for (i = 0; i < nrels; i++) {
3106 if (!gelf_getrel(data, i, &rel)) {
3107 pr_warn("relocation: failed to get %d reloc\n", i);
3108 return -LIBBPF_ERRNO__FORMAT;
3110 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3111 pr_warn("relocation: symbol %"PRIx64" not found\n",
3112 GELF_R_SYM(rel.r_info));
3113 return -LIBBPF_ERRNO__FORMAT;
3115 if (rel.r_offset % sizeof(struct bpf_insn))
3116 return -LIBBPF_ERRNO__FORMAT;
3118 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
3119 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
3120 sym.st_name) ? : "<?>";
3122 pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
3123 (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info),
3124 (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info),
3125 GELF_ST_BIND(sym.st_info), sym.st_name, name,
3128 err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
3129 insn_idx, name, &sym, &rel);
3136 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3138 struct bpf_map_def *def = &map->def;
3139 __u32 key_type_id = 0, value_type_id = 0;
3142 /* if it's BTF-defined map, we don't need to search for type IDs.
3143 * For struct_ops map, it does not need btf_key_type_id and
3144 * btf_value_type_id.
3146 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3147 bpf_map__is_struct_ops(map))
3150 if (!bpf_map__is_internal(map)) {
3151 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3152 def->value_size, &key_type_id,
3156 * LLVM annotates global data differently in BTF, that is,
3157 * only as '.data', '.bss' or '.rodata'.
3159 ret = btf__find_by_name(obj->btf,
3160 libbpf_type_to_btf_name[map->libbpf_type]);
3165 map->btf_key_type_id = key_type_id;
3166 map->btf_value_type_id = bpf_map__is_internal(map) ?
3167 ret : value_type_id;
3171 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3173 struct bpf_map_info info = {};
3174 __u32 len = sizeof(info);
3178 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3182 new_name = strdup(info.name);
3186 new_fd = open("/", O_RDONLY | O_CLOEXEC);
3189 goto err_free_new_name;
3192 new_fd = dup3(fd, new_fd, O_CLOEXEC);
3195 goto err_close_new_fd;
3198 err = zclose(map->fd);
3201 goto err_close_new_fd;
3206 map->name = new_name;
3207 map->def.type = info.type;
3208 map->def.key_size = info.key_size;
3209 map->def.value_size = info.value_size;
3210 map->def.max_entries = info.max_entries;
3211 map->def.map_flags = info.map_flags;
3212 map->btf_key_type_id = info.btf_key_type_id;
3213 map->btf_value_type_id = info.btf_value_type_id;
3225 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3227 if (!map || !max_entries)
3230 /* If map already created, its attributes can't be changed. */
3234 map->def.max_entries = max_entries;
3240 bpf_object__probe_loading(struct bpf_object *obj)
3242 struct bpf_load_program_attr attr;
3243 char *cp, errmsg[STRERR_BUFSIZE];
3244 struct bpf_insn insns[] = {
3245 BPF_MOV64_IMM(BPF_REG_0, 0),
3250 /* make sure basic loading works */
3252 memset(&attr, 0, sizeof(attr));
3253 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3255 attr.insns_cnt = ARRAY_SIZE(insns);
3256 attr.license = "GPL";
3258 ret = bpf_load_program_xattr(&attr, NULL, 0);
3261 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3262 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3263 "program. Make sure your kernel supports BPF "
3264 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3265 "set to big enough value.\n", __func__, cp, ret);
3274 bpf_object__probe_name(struct bpf_object *obj)
3276 struct bpf_load_program_attr attr;
3277 struct bpf_insn insns[] = {
3278 BPF_MOV64_IMM(BPF_REG_0, 0),
3283 /* make sure loading with name works */
3285 memset(&attr, 0, sizeof(attr));
3286 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3288 attr.insns_cnt = ARRAY_SIZE(insns);
3289 attr.license = "GPL";
3291 ret = bpf_load_program_xattr(&attr, NULL, 0);
3301 bpf_object__probe_global_data(struct bpf_object *obj)
3303 struct bpf_load_program_attr prg_attr;
3304 struct bpf_create_map_attr map_attr;
3305 char *cp, errmsg[STRERR_BUFSIZE];
3306 struct bpf_insn insns[] = {
3307 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3308 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3309 BPF_MOV64_IMM(BPF_REG_0, 0),
3314 memset(&map_attr, 0, sizeof(map_attr));
3315 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3316 map_attr.key_size = sizeof(int);
3317 map_attr.value_size = 32;
3318 map_attr.max_entries = 1;
3320 map = bpf_create_map_xattr(&map_attr);
3322 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3323 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3324 __func__, cp, errno);
3330 memset(&prg_attr, 0, sizeof(prg_attr));
3331 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3332 prg_attr.insns = insns;
3333 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3334 prg_attr.license = "GPL";
3336 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3338 obj->caps.global_data = 1;
3346 static int bpf_object__probe_btf_func(struct bpf_object *obj)
3348 static const char strs[] = "\0int\0x\0a";
3349 /* void x(int a) {} */
3352 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3353 /* FUNC_PROTO */ /* [2] */
3354 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3355 BTF_PARAM_ENC(7, 1),
3356 /* FUNC x */ /* [3] */
3357 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3361 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
3362 strs, sizeof(strs));
3364 obj->caps.btf_func = 1;
3372 static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
3374 static const char strs[] = "\0int\0x\0a";
3375 /* static void x(int a) {} */
3378 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3379 /* FUNC_PROTO */ /* [2] */
3380 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3381 BTF_PARAM_ENC(7, 1),
3382 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
3383 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3387 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
3388 strs, sizeof(strs));
3390 obj->caps.btf_func_global = 1;
3398 static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
3400 static const char strs[] = "\0x\0.data";
3404 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3405 /* VAR x */ /* [2] */
3406 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3408 /* DATASEC val */ /* [3] */
3409 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3410 BTF_VAR_SECINFO_ENC(2, 0, 4),
3414 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
3415 strs, sizeof(strs));
3417 obj->caps.btf_datasec = 1;
3425 static int bpf_object__probe_array_mmap(struct bpf_object *obj)
3427 struct bpf_create_map_attr attr = {
3428 .map_type = BPF_MAP_TYPE_ARRAY,
3429 .map_flags = BPF_F_MMAPABLE,
3430 .key_size = sizeof(int),
3431 .value_size = sizeof(int),
3436 fd = bpf_create_map_xattr(&attr);
3438 obj->caps.array_mmap = 1;
3447 bpf_object__probe_exp_attach_type(struct bpf_object *obj)
3449 struct bpf_load_program_attr attr;
3450 struct bpf_insn insns[] = {
3451 BPF_MOV64_IMM(BPF_REG_0, 0),
3456 memset(&attr, 0, sizeof(attr));
3457 /* use any valid combination of program type and (optional)
3458 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
3459 * to see if kernel supports expected_attach_type field for
3460 * BPF_PROG_LOAD command
3462 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3463 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3465 attr.insns_cnt = ARRAY_SIZE(insns);
3466 attr.license = "GPL";
3468 fd = bpf_load_program_xattr(&attr, NULL, 0);
3470 obj->caps.exp_attach_type = 1;
3478 bpf_object__probe_caps(struct bpf_object *obj)
3480 int (*probe_fn[])(struct bpf_object *obj) = {
3481 bpf_object__probe_name,
3482 bpf_object__probe_global_data,
3483 bpf_object__probe_btf_func,
3484 bpf_object__probe_btf_func_global,
3485 bpf_object__probe_btf_datasec,
3486 bpf_object__probe_array_mmap,
3487 bpf_object__probe_exp_attach_type,
3491 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
3492 ret = probe_fn[i](obj);
3494 pr_debug("Probe #%d failed with %d.\n", i, ret);
3500 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
3502 struct bpf_map_info map_info = {};
3503 char msg[STRERR_BUFSIZE];
3506 map_info_len = sizeof(map_info);
3508 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
3509 pr_warn("failed to get map info for map FD %d: %s\n",
3510 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
3514 return (map_info.type == map->def.type &&
3515 map_info.key_size == map->def.key_size &&
3516 map_info.value_size == map->def.value_size &&
3517 map_info.max_entries == map->def.max_entries &&
3518 map_info.map_flags == map->def.map_flags);
3522 bpf_object__reuse_map(struct bpf_map *map)
3524 char *cp, errmsg[STRERR_BUFSIZE];
3527 pin_fd = bpf_obj_get(map->pin_path);
3530 if (err == -ENOENT) {
3531 pr_debug("found no pinned map to reuse at '%s'\n",
3536 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
3537 pr_warn("couldn't retrieve pinned map '%s': %s\n",
3542 if (!map_is_reuse_compat(map, pin_fd)) {
3543 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
3549 err = bpf_map__reuse_fd(map, pin_fd);
3555 pr_debug("reused pinned map at '%s'\n", map->pin_path);
3561 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
3563 enum libbpf_map_type map_type = map->libbpf_type;
3564 char *cp, errmsg[STRERR_BUFSIZE];
3567 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
3570 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
3571 pr_warn("Error setting initial map(%s) contents: %s\n",
3576 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
3577 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
3578 err = bpf_map_freeze(map->fd);
3581 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
3582 pr_warn("Error freezing map(%s) as read-only: %s\n",
3590 static void bpf_map__destroy(struct bpf_map *map);
3592 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
3594 struct bpf_create_map_attr create_attr;
3595 struct bpf_map_def *def = &map->def;
3597 memset(&create_attr, 0, sizeof(create_attr));
3600 create_attr.name = map->name;
3601 create_attr.map_ifindex = map->map_ifindex;
3602 create_attr.map_type = def->type;
3603 create_attr.map_flags = def->map_flags;
3604 create_attr.key_size = def->key_size;
3605 create_attr.value_size = def->value_size;
3607 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
3610 nr_cpus = libbpf_num_possible_cpus();
3612 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
3613 map->name, nr_cpus);
3616 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
3617 create_attr.max_entries = nr_cpus;
3619 create_attr.max_entries = def->max_entries;
3622 if (bpf_map__is_struct_ops(map))
3623 create_attr.btf_vmlinux_value_type_id =
3624 map->btf_vmlinux_value_type_id;
3626 create_attr.btf_fd = 0;
3627 create_attr.btf_key_type_id = 0;
3628 create_attr.btf_value_type_id = 0;
3629 if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
3630 create_attr.btf_fd = btf__fd(obj->btf);
3631 create_attr.btf_key_type_id = map->btf_key_type_id;
3632 create_attr.btf_value_type_id = map->btf_value_type_id;
3635 if (bpf_map_type__is_map_in_map(def->type)) {
3636 if (map->inner_map) {
3639 err = bpf_object__create_map(obj, map->inner_map);
3641 pr_warn("map '%s': failed to create inner map: %d\n",
3645 map->inner_map_fd = bpf_map__fd(map->inner_map);
3647 if (map->inner_map_fd >= 0)
3648 create_attr.inner_map_fd = map->inner_map_fd;
3651 map->fd = bpf_create_map_xattr(&create_attr);
3652 if (map->fd < 0 && (create_attr.btf_key_type_id ||
3653 create_attr.btf_value_type_id)) {
3654 char *cp, errmsg[STRERR_BUFSIZE];
3657 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
3658 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
3659 map->name, cp, err);
3660 create_attr.btf_fd = 0;
3661 create_attr.btf_key_type_id = 0;
3662 create_attr.btf_value_type_id = 0;
3663 map->btf_key_type_id = 0;
3664 map->btf_value_type_id = 0;
3665 map->fd = bpf_create_map_xattr(&create_attr);
3671 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
3672 bpf_map__destroy(map->inner_map);
3673 zfree(&map->inner_map);
3680 bpf_object__create_maps(struct bpf_object *obj)
3682 struct bpf_map *map;
3683 char *cp, errmsg[STRERR_BUFSIZE];
3687 for (i = 0; i < obj->nr_maps; i++) {
3688 map = &obj->maps[i];
3690 if (map->pin_path) {
3691 err = bpf_object__reuse_map(map);
3693 pr_warn("map '%s': error reusing pinned map\n",
3700 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
3701 map->name, map->fd);
3705 err = bpf_object__create_map(obj, map);
3709 pr_debug("map '%s': created successfully, fd=%d\n", map->name,
3712 if (bpf_map__is_internal(map)) {
3713 err = bpf_object__populate_internal_map(obj, map);
3720 if (map->init_slots_sz) {
3721 for (j = 0; j < map->init_slots_sz; j++) {
3722 const struct bpf_map *targ_map;
3725 if (!map->init_slots[j])
3728 targ_map = map->init_slots[j];
3729 fd = bpf_map__fd(targ_map);
3730 err = bpf_map_update_elem(map->fd, &j, &fd, 0);
3733 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
3734 map->name, j, targ_map->name,
3738 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
3739 map->name, j, targ_map->name, fd);
3741 zfree(&map->init_slots);
3742 map->init_slots_sz = 0;
3745 if (map->pin_path && !map->pinned) {
3746 err = bpf_map__pin(map, NULL);
3748 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
3749 map->name, map->pin_path, err);
3759 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
3760 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
3762 for (j = 0; j < i; j++)
3763 zclose(obj->maps[j].fd);
3768 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
3769 void *btf_prog_info, const char *info_name)
3771 if (err != -ENOENT) {
3772 pr_warn("Error in loading %s for sec %s.\n",
3773 info_name, prog->section_name);
3777 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
3779 if (btf_prog_info) {
3781 * Some info has already been found but has problem
3782 * in the last btf_ext reloc. Must have to error out.
3784 pr_warn("Error in relocating %s for sec %s.\n",
3785 info_name, prog->section_name);
3789 /* Have problem loading the very first info. Ignore the rest. */
3790 pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
3791 info_name, prog->section_name, info_name);
3796 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
3797 const char *section_name, __u32 insn_offset)
3801 if (!insn_offset || prog->func_info) {
3803 * !insn_offset => main program
3805 * For sub prog, the main program's func_info has to
3806 * be loaded first (i.e. prog->func_info != NULL)
3808 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
3809 section_name, insn_offset,
3811 &prog->func_info_cnt);
3813 return check_btf_ext_reloc_err(prog, err,
3817 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
3820 if (!insn_offset || prog->line_info) {
3821 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
3822 section_name, insn_offset,
3824 &prog->line_info_cnt);
3826 return check_btf_ext_reloc_err(prog, err,
3830 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
3836 #define BPF_CORE_SPEC_MAX_LEN 64
3838 /* represents BPF CO-RE field or array element accessor */
3839 struct bpf_core_accessor {
3840 __u32 type_id; /* struct/union type or array element type */
3841 __u32 idx; /* field index or array index */
3842 const char *name; /* field name or NULL for array accessor */
3845 struct bpf_core_spec {
3846 const struct btf *btf;
3847 /* high-level spec: named fields and array indices only */
3848 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
3849 /* high-level spec length */
3851 /* raw, low-level spec: 1-to-1 with accessor spec string */
3852 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
3853 /* raw spec length */
3855 /* field bit offset represented by spec */
3859 static bool str_is_empty(const char *s)
3864 static bool is_flex_arr(const struct btf *btf,
3865 const struct bpf_core_accessor *acc,
3866 const struct btf_array *arr)
3868 const struct btf_type *t;
3870 /* not a flexible array, if not inside a struct or has non-zero size */
3871 if (!acc->name || arr->nelems > 0)
3874 /* has to be the last member of enclosing struct */
3875 t = btf__type_by_id(btf, acc->type_id);
3876 return acc->idx == btf_vlen(t) - 1;
3880 * Turn bpf_field_reloc into a low- and high-level spec representation,
3881 * validating correctness along the way, as well as calculating resulting
3882 * field bit offset, specified by accessor string. Low-level spec captures
3883 * every single level of nestedness, including traversing anonymous
3884 * struct/union members. High-level one only captures semantically meaningful
3885 * "turning points": named fields and array indicies.
3886 * E.g., for this case:
3889 * int __unimportant;
3897 * struct sample *s = ...;
3899 * int x = &s->a[3]; // access string = '0:1:2:3'
3901 * Low-level spec has 1:1 mapping with each element of access string (it's
3902 * just a parsed access string representation): [0, 1, 2, 3].
3904 * High-level spec will capture only 3 points:
3905 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
3906 * - field 'a' access (corresponds to '2' in low-level spec);
3907 * - array element #3 access (corresponds to '3' in low-level spec).
3910 static int bpf_core_spec_parse(const struct btf *btf,
3912 const char *spec_str,
3913 struct bpf_core_spec *spec)
3915 int access_idx, parsed_len, i;
3916 struct bpf_core_accessor *acc;
3917 const struct btf_type *t;
3922 if (str_is_empty(spec_str) || *spec_str == ':')
3925 memset(spec, 0, sizeof(*spec));
3928 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
3930 if (*spec_str == ':')
3932 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
3934 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
3936 spec_str += parsed_len;
3937 spec->raw_spec[spec->raw_len++] = access_idx;
3940 if (spec->raw_len == 0)
3943 /* first spec value is always reloc type array index */
3944 t = skip_mods_and_typedefs(btf, type_id, &id);
3948 access_idx = spec->raw_spec[0];
3949 spec->spec[0].type_id = id;
3950 spec->spec[0].idx = access_idx;
3953 sz = btf__resolve_size(btf, id);
3956 spec->bit_offset = access_idx * sz * 8;
3958 for (i = 1; i < spec->raw_len; i++) {
3959 t = skip_mods_and_typedefs(btf, id, &id);
3963 access_idx = spec->raw_spec[i];
3964 acc = &spec->spec[spec->len];
3966 if (btf_is_composite(t)) {
3967 const struct btf_member *m;
3970 if (access_idx >= btf_vlen(t))
3973 bit_offset = btf_member_bit_offset(t, access_idx);
3974 spec->bit_offset += bit_offset;
3976 m = btf_members(t) + access_idx;
3978 name = btf__name_by_offset(btf, m->name_off);
3979 if (str_is_empty(name))
3983 acc->idx = access_idx;
3989 } else if (btf_is_array(t)) {
3990 const struct btf_array *a = btf_array(t);
3993 t = skip_mods_and_typedefs(btf, a->type, &id);
3997 flex = is_flex_arr(btf, acc - 1, a);
3998 if (!flex && access_idx >= a->nelems)
4001 spec->spec[spec->len].type_id = id;
4002 spec->spec[spec->len].idx = access_idx;
4005 sz = btf__resolve_size(btf, id);
4008 spec->bit_offset += access_idx * sz * 8;
4010 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
4011 type_id, spec_str, i, id, btf_kind(t));
4019 static bool bpf_core_is_flavor_sep(const char *s)
4021 /* check X___Y name pattern, where X and Y are not underscores */
4022 return s[0] != '_' && /* X */
4023 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
4024 s[4] != '_'; /* Y */
4027 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
4028 * before last triple underscore. Struct name part after last triple
4029 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4031 static size_t bpf_core_essential_name_len(const char *name)
4033 size_t n = strlen(name);
4036 for (i = n - 5; i >= 0; i--) {
4037 if (bpf_core_is_flavor_sep(name + i))
4043 /* dynamically sized list of type IDs */
4049 static void bpf_core_free_cands(struct ids_vec *cand_ids)
4051 free(cand_ids->data);
4055 static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
4056 __u32 local_type_id,
4057 const struct btf *targ_btf)
4059 size_t local_essent_len, targ_essent_len;
4060 const char *local_name, *targ_name;
4061 const struct btf_type *t;
4062 struct ids_vec *cand_ids;
4066 t = btf__type_by_id(local_btf, local_type_id);
4068 return ERR_PTR(-EINVAL);
4070 local_name = btf__name_by_offset(local_btf, t->name_off);
4071 if (str_is_empty(local_name))
4072 return ERR_PTR(-EINVAL);
4073 local_essent_len = bpf_core_essential_name_len(local_name);
4075 cand_ids = calloc(1, sizeof(*cand_ids));
4077 return ERR_PTR(-ENOMEM);
4079 n = btf__get_nr_types(targ_btf);
4080 for (i = 1; i <= n; i++) {
4081 t = btf__type_by_id(targ_btf, i);
4082 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4083 if (str_is_empty(targ_name))
4086 t = skip_mods_and_typedefs(targ_btf, i, NULL);
4087 if (!btf_is_composite(t) && !btf_is_array(t))
4090 targ_essent_len = bpf_core_essential_name_len(targ_name);
4091 if (targ_essent_len != local_essent_len)
4094 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
4095 pr_debug("[%d] %s: found candidate [%d] %s\n",
4096 local_type_id, local_name, i, targ_name);
4097 new_ids = reallocarray(cand_ids->data,
4099 sizeof(*cand_ids->data));
4104 cand_ids->data = new_ids;
4105 cand_ids->data[cand_ids->len++] = i;
4110 bpf_core_free_cands(cand_ids);
4111 return ERR_PTR(err);
4114 /* Check two types for compatibility, skipping const/volatile/restrict and
4115 * typedefs, to ensure we are relocating compatible entities:
4116 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4117 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4118 * - any two PTRs are always compatible;
4119 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4120 * least one of enums should be anonymous;
4121 * - for ENUMs, check sizes, names are ignored;
4122 * - for INT, size and signedness are ignored;
4123 * - for ARRAY, dimensionality is ignored, element types are checked for
4124 * compatibility recursively;
4125 * - everything else shouldn't be ever a target of relocation.
4126 * These rules are not set in stone and probably will be adjusted as we get
4127 * more experience with using BPF CO-RE relocations.
4129 static int bpf_core_fields_are_compat(const struct btf *local_btf,
4131 const struct btf *targ_btf,
4134 const struct btf_type *local_type, *targ_type;
4137 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4138 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4139 if (!local_type || !targ_type)
4142 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4144 if (btf_kind(local_type) != btf_kind(targ_type))
4147 switch (btf_kind(local_type)) {
4151 case BTF_KIND_ENUM: {
4152 const char *local_name, *targ_name;
4153 size_t local_len, targ_len;
4155 local_name = btf__name_by_offset(local_btf,
4156 local_type->name_off);
4157 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4158 local_len = bpf_core_essential_name_len(local_name);
4159 targ_len = bpf_core_essential_name_len(targ_name);
4160 /* one of them is anonymous or both w/ same flavor-less names */
4161 return local_len == 0 || targ_len == 0 ||
4162 (local_len == targ_len &&
4163 strncmp(local_name, targ_name, local_len) == 0);
4166 /* just reject deprecated bitfield-like integers; all other
4167 * integers are by default compatible between each other
4169 return btf_int_offset(local_type) == 0 &&
4170 btf_int_offset(targ_type) == 0;
4171 case BTF_KIND_ARRAY:
4172 local_id = btf_array(local_type)->type;
4173 targ_id = btf_array(targ_type)->type;
4176 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
4177 btf_kind(local_type), local_id, targ_id);
4183 * Given single high-level named field accessor in local type, find
4184 * corresponding high-level accessor for a target type. Along the way,
4185 * maintain low-level spec for target as well. Also keep updating target
4188 * Searching is performed through recursive exhaustive enumeration of all
4189 * fields of a struct/union. If there are any anonymous (embedded)
4190 * structs/unions, they are recursively searched as well. If field with
4191 * desired name is found, check compatibility between local and target types,
4192 * before returning result.
4194 * 1 is returned, if field is found.
4195 * 0 is returned if no compatible field is found.
4196 * <0 is returned on error.
4198 static int bpf_core_match_member(const struct btf *local_btf,
4199 const struct bpf_core_accessor *local_acc,
4200 const struct btf *targ_btf,
4202 struct bpf_core_spec *spec,
4203 __u32 *next_targ_id)
4205 const struct btf_type *local_type, *targ_type;
4206 const struct btf_member *local_member, *m;
4207 const char *local_name, *targ_name;
4211 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4214 if (!btf_is_composite(targ_type))
4217 local_id = local_acc->type_id;
4218 local_type = btf__type_by_id(local_btf, local_id);
4219 local_member = btf_members(local_type) + local_acc->idx;
4220 local_name = btf__name_by_offset(local_btf, local_member->name_off);
4222 n = btf_vlen(targ_type);
4223 m = btf_members(targ_type);
4224 for (i = 0; i < n; i++, m++) {
4227 bit_offset = btf_member_bit_offset(targ_type, i);
4229 /* too deep struct/union/array nesting */
4230 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4233 /* speculate this member will be the good one */
4234 spec->bit_offset += bit_offset;
4235 spec->raw_spec[spec->raw_len++] = i;
4237 targ_name = btf__name_by_offset(targ_btf, m->name_off);
4238 if (str_is_empty(targ_name)) {
4239 /* embedded struct/union, we need to go deeper */
4240 found = bpf_core_match_member(local_btf, local_acc,
4242 spec, next_targ_id);
4243 if (found) /* either found or error */
4245 } else if (strcmp(local_name, targ_name) == 0) {
4246 /* matching named field */
4247 struct bpf_core_accessor *targ_acc;
4249 targ_acc = &spec->spec[spec->len++];
4250 targ_acc->type_id = targ_id;
4252 targ_acc->name = targ_name;
4254 *next_targ_id = m->type;
4255 found = bpf_core_fields_are_compat(local_btf,
4259 spec->len--; /* pop accessor */
4262 /* member turned out not to be what we looked for */
4263 spec->bit_offset -= bit_offset;
4271 * Try to match local spec to a target type and, if successful, produce full
4272 * target spec (high-level, low-level + bit offset).
4274 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
4275 const struct btf *targ_btf, __u32 targ_id,
4276 struct bpf_core_spec *targ_spec)
4278 const struct btf_type *targ_type;
4279 const struct bpf_core_accessor *local_acc;
4280 struct bpf_core_accessor *targ_acc;
4283 memset(targ_spec, 0, sizeof(*targ_spec));
4284 targ_spec->btf = targ_btf;
4286 local_acc = &local_spec->spec[0];
4287 targ_acc = &targ_spec->spec[0];
4289 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
4290 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
4295 if (local_acc->name) {
4296 matched = bpf_core_match_member(local_spec->btf,
4299 targ_spec, &targ_id);
4303 /* for i=0, targ_id is already treated as array element
4304 * type (because it's the original struct), for others
4305 * we should find array element type first
4308 const struct btf_array *a;
4311 if (!btf_is_array(targ_type))
4314 a = btf_array(targ_type);
4315 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
4316 if (!flex && local_acc->idx >= a->nelems)
4318 if (!skip_mods_and_typedefs(targ_btf, a->type,
4323 /* too deep struct/union/array nesting */
4324 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4327 targ_acc->type_id = targ_id;
4328 targ_acc->idx = local_acc->idx;
4329 targ_acc->name = NULL;
4331 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
4332 targ_spec->raw_len++;
4334 sz = btf__resolve_size(targ_btf, targ_id);
4337 targ_spec->bit_offset += local_acc->idx * sz * 8;
4344 static int bpf_core_calc_field_relo(const struct bpf_program *prog,
4345 const struct bpf_field_reloc *relo,
4346 const struct bpf_core_spec *spec,
4347 __u32 *val, bool *validate)
4349 const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
4350 const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
4351 __u32 byte_off, byte_sz, bit_off, bit_sz;
4352 const struct btf_member *m;
4353 const struct btf_type *mt;
4357 /* a[n] accessor needs special handling */
4359 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
4360 *val = spec->bit_offset / 8;
4361 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
4362 sz = btf__resolve_size(spec->btf, acc->type_id);
4367 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
4368 bpf_program__title(prog, false),
4369 relo->kind, relo->insn_off / 8);
4377 m = btf_members(t) + acc->idx;
4378 mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
4379 bit_off = spec->bit_offset;
4380 bit_sz = btf_member_bitfield_size(t, acc->idx);
4382 bitfield = bit_sz > 0;
4385 byte_off = bit_off / 8 / byte_sz * byte_sz;
4386 /* figure out smallest int size necessary for bitfield load */
4387 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
4389 /* bitfield can't be read with 64-bit read */
4390 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
4391 bpf_program__title(prog, false),
4392 relo->kind, relo->insn_off / 8);
4396 byte_off = bit_off / 8 / byte_sz * byte_sz;
4399 sz = btf__resolve_size(spec->btf, m->type);
4403 byte_off = spec->bit_offset / 8;
4404 bit_sz = byte_sz * 8;
4407 /* for bitfields, all the relocatable aspects are ambiguous and we
4408 * might disagree with compiler, so turn off validation of expected
4409 * value, except for signedness
4412 *validate = !bitfield;
4414 switch (relo->kind) {
4415 case BPF_FIELD_BYTE_OFFSET:
4418 case BPF_FIELD_BYTE_SIZE:
4421 case BPF_FIELD_SIGNED:
4422 /* enums will be assumed unsigned */
4423 *val = btf_is_enum(mt) ||
4424 (btf_int_encoding(mt) & BTF_INT_SIGNED);
4426 *validate = true; /* signedness is never ambiguous */
4428 case BPF_FIELD_LSHIFT_U64:
4429 #if __BYTE_ORDER == __LITTLE_ENDIAN
4430 *val = 64 - (bit_off + bit_sz - byte_off * 8);
4432 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
4435 case BPF_FIELD_RSHIFT_U64:
4438 *validate = true; /* right shift is never ambiguous */
4440 case BPF_FIELD_EXISTS:
4442 pr_warn("prog '%s': unknown relo %d at insn #%d\n",
4443 bpf_program__title(prog, false),
4444 relo->kind, relo->insn_off / 8);
4452 * Patch relocatable BPF instruction.
4454 * Patched value is determined by relocation kind and target specification.
4455 * For field existence relocation target spec will be NULL if field is not
4457 * Expected insn->imm value is determined using relocation kind and local
4458 * spec, and is checked before patching instruction. If actual insn->imm value
4459 * is wrong, bail out with error.
4461 * Currently three kinds of BPF instructions are supported:
4462 * 1. rX = <imm> (assignment with immediate operand);
4463 * 2. rX += <imm> (arithmetic operations with immediate operand);
4465 static int bpf_core_reloc_insn(struct bpf_program *prog,
4466 const struct bpf_field_reloc *relo,
4468 const struct bpf_core_spec *local_spec,
4469 const struct bpf_core_spec *targ_spec)
4471 __u32 orig_val, new_val;
4472 struct bpf_insn *insn;
4473 bool validate = true;
4477 if (relo->insn_off % sizeof(struct bpf_insn))
4479 insn_idx = relo->insn_off / sizeof(struct bpf_insn);
4480 insn = &prog->insns[insn_idx];
4481 class = BPF_CLASS(insn->code);
4483 if (relo->kind == BPF_FIELD_EXISTS) {
4484 orig_val = 1; /* can't generate EXISTS relo w/o local field */
4485 new_val = targ_spec ? 1 : 0;
4486 } else if (!targ_spec) {
4487 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
4488 bpf_program__title(prog, false), relo_idx, insn_idx);
4489 insn->code = BPF_JMP | BPF_CALL;
4493 /* if this instruction is reachable (not a dead code),
4494 * verifier will complain with the following message:
4495 * invalid func unknown#195896080
4497 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
4500 err = bpf_core_calc_field_relo(prog, relo, local_spec,
4501 &orig_val, &validate);
4504 err = bpf_core_calc_field_relo(prog, relo, targ_spec,
4513 if (BPF_SRC(insn->code) != BPF_K)
4515 if (validate && insn->imm != orig_val) {
4516 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
4517 bpf_program__title(prog, false), relo_idx,
4518 insn_idx, insn->imm, orig_val, new_val);
4521 orig_val = insn->imm;
4522 insn->imm = new_val;
4523 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
4524 bpf_program__title(prog, false), relo_idx, insn_idx,
4530 if (validate && insn->off != orig_val) {
4531 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LD/LDX/ST/STX) value: got %u, exp %u -> %u\n",
4532 bpf_program__title(prog, false), relo_idx,
4533 insn_idx, insn->off, orig_val, new_val);
4536 if (new_val > SHRT_MAX) {
4537 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
4538 bpf_program__title(prog, false), relo_idx,
4542 orig_val = insn->off;
4543 insn->off = new_val;
4544 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
4545 bpf_program__title(prog, false), relo_idx, insn_idx,
4549 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
4550 bpf_program__title(prog, false), relo_idx,
4551 insn_idx, insn->code, insn->src_reg, insn->dst_reg,
4552 insn->off, insn->imm);
4559 /* Output spec definition in the format:
4560 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
4561 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
4563 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
4565 const struct btf_type *t;
4570 type_id = spec->spec[0].type_id;
4571 t = btf__type_by_id(spec->btf, type_id);
4572 s = btf__name_by_offset(spec->btf, t->name_off);
4573 libbpf_print(level, "[%u] %s + ", type_id, s);
4575 for (i = 0; i < spec->raw_len; i++)
4576 libbpf_print(level, "%d%s", spec->raw_spec[i],
4577 i == spec->raw_len - 1 ? " => " : ":");
4579 libbpf_print(level, "%u.%u @ &x",
4580 spec->bit_offset / 8, spec->bit_offset % 8);
4582 for (i = 0; i < spec->len; i++) {
4583 if (spec->spec[i].name)
4584 libbpf_print(level, ".%s", spec->spec[i].name);
4586 libbpf_print(level, "[%u]", spec->spec[i].idx);
4591 static size_t bpf_core_hash_fn(const void *key, void *ctx)
4596 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
4601 static void *u32_as_hash_key(__u32 x)
4603 return (void *)(uintptr_t)x;
4607 * CO-RE relocate single instruction.
4609 * The outline and important points of the algorithm:
4610 * 1. For given local type, find corresponding candidate target types.
4611 * Candidate type is a type with the same "essential" name, ignoring
4612 * everything after last triple underscore (___). E.g., `sample`,
4613 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
4614 * for each other. Names with triple underscore are referred to as
4615 * "flavors" and are useful, among other things, to allow to
4616 * specify/support incompatible variations of the same kernel struct, which
4617 * might differ between different kernel versions and/or build
4620 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
4621 * converter, when deduplicated BTF of a kernel still contains more than
4622 * one different types with the same name. In that case, ___2, ___3, etc
4623 * are appended starting from second name conflict. But start flavors are
4624 * also useful to be defined "locally", in BPF program, to extract same
4625 * data from incompatible changes between different kernel
4626 * versions/configurations. For instance, to handle field renames between
4627 * kernel versions, one can use two flavors of the struct name with the
4628 * same common name and use conditional relocations to extract that field,
4629 * depending on target kernel version.
4630 * 2. For each candidate type, try to match local specification to this
4631 * candidate target type. Matching involves finding corresponding
4632 * high-level spec accessors, meaning that all named fields should match,
4633 * as well as all array accesses should be within the actual bounds. Also,
4634 * types should be compatible (see bpf_core_fields_are_compat for details).
4635 * 3. It is supported and expected that there might be multiple flavors
4636 * matching the spec. As long as all the specs resolve to the same set of
4637 * offsets across all candidates, there is no error. If there is any
4638 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
4639 * imprefection of BTF deduplication, which can cause slight duplication of
4640 * the same BTF type, if some directly or indirectly referenced (by
4641 * pointer) type gets resolved to different actual types in different
4642 * object files. If such situation occurs, deduplicated BTF will end up
4643 * with two (or more) structurally identical types, which differ only in
4644 * types they refer to through pointer. This should be OK in most cases and
4646 * 4. Candidate types search is performed by linearly scanning through all
4647 * types in target BTF. It is anticipated that this is overall more
4648 * efficient memory-wise and not significantly worse (if not better)
4649 * CPU-wise compared to prebuilding a map from all local type names to
4650 * a list of candidate type names. It's also sped up by caching resolved
4651 * list of matching candidates per each local "root" type ID, that has at
4652 * least one bpf_field_reloc associated with it. This list is shared
4653 * between multiple relocations for the same type ID and is updated as some
4654 * of the candidates are pruned due to structural incompatibility.
4656 static int bpf_core_reloc_field(struct bpf_program *prog,
4657 const struct bpf_field_reloc *relo,
4659 const struct btf *local_btf,
4660 const struct btf *targ_btf,
4661 struct hashmap *cand_cache)
4663 const char *prog_name = bpf_program__title(prog, false);
4664 struct bpf_core_spec local_spec, cand_spec, targ_spec;
4665 const void *type_key = u32_as_hash_key(relo->type_id);
4666 const struct btf_type *local_type, *cand_type;
4667 const char *local_name, *cand_name;
4668 struct ids_vec *cand_ids;
4669 __u32 local_id, cand_id;
4670 const char *spec_str;
4673 local_id = relo->type_id;
4674 local_type = btf__type_by_id(local_btf, local_id);
4678 local_name = btf__name_by_offset(local_btf, local_type->name_off);
4679 if (str_is_empty(local_name))
4682 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
4683 if (str_is_empty(spec_str))
4686 err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
4688 pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
4689 prog_name, relo_idx, local_id, local_name, spec_str,
4694 pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
4696 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
4697 libbpf_print(LIBBPF_DEBUG, "\n");
4699 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
4700 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
4701 if (IS_ERR(cand_ids)) {
4702 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
4703 prog_name, relo_idx, local_id, local_name,
4705 return PTR_ERR(cand_ids);
4707 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
4709 bpf_core_free_cands(cand_ids);
4714 for (i = 0, j = 0; i < cand_ids->len; i++) {
4715 cand_id = cand_ids->data[i];
4716 cand_type = btf__type_by_id(targ_btf, cand_id);
4717 cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
4719 err = bpf_core_spec_match(&local_spec, targ_btf,
4720 cand_id, &cand_spec);
4721 pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
4722 prog_name, relo_idx, i, cand_name);
4723 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
4724 libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
4726 pr_warn("prog '%s': relo #%d: matching error: %d\n",
4727 prog_name, relo_idx, err);
4734 targ_spec = cand_spec;
4735 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
4736 /* if there are many candidates, they should all
4737 * resolve to the same bit offset
4739 pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
4740 prog_name, relo_idx, cand_spec.bit_offset,
4741 targ_spec.bit_offset);
4745 cand_ids->data[j++] = cand_spec.spec[0].type_id;
4749 * For BPF_FIELD_EXISTS relo or when used BPF program has field
4750 * existence checks or kernel version/config checks, it's expected
4751 * that we might not find any candidates. In this case, if field
4752 * wasn't found in any candidate, the list of candidates shouldn't
4753 * change at all, we'll just handle relocating appropriately,
4754 * depending on relo's kind.
4760 * If no candidates were found, it might be both a programmer error,
4761 * as well as expected case, depending whether instruction w/
4762 * relocation is guarded in some way that makes it unreachable (dead
4763 * code) if relocation can't be resolved. This is handled in
4764 * bpf_core_reloc_insn() uniformly by replacing that instruction with
4765 * BPF helper call insn (using invalid helper ID). If that instruction
4766 * is indeed unreachable, then it will be ignored and eliminated by
4767 * verifier. If it was an error, then verifier will complain and point
4768 * to a specific instruction number in its log.
4771 pr_debug("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
4772 prog_name, relo_idx, local_id, local_name, spec_str);
4774 /* bpf_core_reloc_insn should know how to handle missing targ_spec */
4775 err = bpf_core_reloc_insn(prog, relo, relo_idx, &local_spec,
4776 j ? &targ_spec : NULL);
4778 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
4779 prog_name, relo_idx, relo->insn_off, err);
4787 bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
4789 const struct btf_ext_info_sec *sec;
4790 const struct bpf_field_reloc *rec;
4791 const struct btf_ext_info *seg;
4792 struct hashmap_entry *entry;
4793 struct hashmap *cand_cache = NULL;
4794 struct bpf_program *prog;
4795 struct btf *targ_btf;
4796 const char *sec_name;
4800 targ_btf = btf__parse_elf(targ_btf_path, NULL);
4802 targ_btf = libbpf_find_kernel_btf();
4803 if (IS_ERR(targ_btf)) {
4804 pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
4805 return PTR_ERR(targ_btf);
4808 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
4809 if (IS_ERR(cand_cache)) {
4810 err = PTR_ERR(cand_cache);
4814 seg = &obj->btf_ext->field_reloc_info;
4815 for_each_btf_ext_sec(seg, sec) {
4816 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
4817 if (str_is_empty(sec_name)) {
4822 for (i = 0; i < obj->nr_programs; i++) {
4823 if (!strcmp(obj->programs[i].section_name, sec_name)) {
4824 prog = &obj->programs[i];
4829 pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
4835 pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
4836 sec_name, sec->num_info);
4838 for_each_btf_ext_rec(seg, sec, i, rec) {
4839 err = bpf_core_reloc_field(prog, rec, i, obj->btf,
4840 targ_btf, cand_cache);
4842 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
4850 btf__free(targ_btf);
4851 if (!IS_ERR_OR_NULL(cand_cache)) {
4852 hashmap__for_each_entry(cand_cache, entry, i) {
4853 bpf_core_free_cands(entry->value);
4855 hashmap__free(cand_cache);
4861 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
4865 if (obj->btf_ext->field_reloc_info.len)
4866 err = bpf_core_reloc_fields(obj, targ_btf_path);
4872 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
4873 struct reloc_desc *relo)
4875 struct bpf_insn *insn, *new_insn;
4876 struct bpf_program *text;
4880 if (prog->idx != obj->efile.text_shndx && prog->main_prog_cnt == 0) {
4881 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
4883 pr_warn("no .text section found yet relo into text exist\n");
4884 return -LIBBPF_ERRNO__RELOC;
4886 new_cnt = prog->insns_cnt + text->insns_cnt;
4887 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
4889 pr_warn("oom in prog realloc\n");
4892 prog->insns = new_insn;
4895 err = bpf_program_reloc_btf_ext(prog, obj,
4902 memcpy(new_insn + prog->insns_cnt, text->insns,
4903 text->insns_cnt * sizeof(*insn));
4904 prog->main_prog_cnt = prog->insns_cnt;
4905 prog->insns_cnt = new_cnt;
4906 pr_debug("added %zd insn from %s to prog %s\n",
4907 text->insns_cnt, text->section_name,
4908 prog->section_name);
4911 insn = &prog->insns[relo->insn_idx];
4912 insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
4917 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
4925 err = bpf_program_reloc_btf_ext(prog, obj,
4926 prog->section_name, 0);
4931 if (!prog->reloc_desc)
4934 for (i = 0; i < prog->nr_reloc; i++) {
4935 struct reloc_desc *relo = &prog->reloc_desc[i];
4936 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
4938 if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
4939 pr_warn("relocation out of range: '%s'\n",
4940 prog->section_name);
4941 return -LIBBPF_ERRNO__RELOC;
4944 switch (relo->type) {
4946 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
4947 insn[0].imm = obj->maps[relo->map_idx].fd;
4950 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
4951 insn[1].imm = insn[0].imm + relo->sym_off;
4952 insn[0].imm = obj->maps[relo->map_idx].fd;
4955 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
4956 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
4957 insn[1].imm = relo->sym_off;
4960 err = bpf_program__reloc_text(prog, obj, relo);
4965 pr_warn("relo #%d: bad relo type %d\n", i, relo->type);
4970 zfree(&prog->reloc_desc);
4976 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
4978 struct bpf_program *prog;
4983 err = bpf_object__relocate_core(obj, targ_btf_path);
4985 pr_warn("failed to perform CO-RE relocations: %d\n",
4990 /* ensure .text is relocated first, as it's going to be copied as-is
4991 * later for sub-program calls
4993 for (i = 0; i < obj->nr_programs; i++) {
4994 prog = &obj->programs[i];
4995 if (prog->idx != obj->efile.text_shndx)
4998 err = bpf_program__relocate(prog, obj);
5000 pr_warn("failed to relocate '%s'\n", prog->section_name);
5005 /* now relocate everything but .text, which by now is relocated
5006 * properly, so we can copy raw sub-program instructions as is safely
5008 for (i = 0; i < obj->nr_programs; i++) {
5009 prog = &obj->programs[i];
5010 if (prog->idx == obj->efile.text_shndx)
5013 err = bpf_program__relocate(prog, obj);
5015 pr_warn("failed to relocate '%s'\n", prog->section_name);
5022 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
5023 GElf_Shdr *shdr, Elf_Data *data);
5025 static int bpf_object__collect_map_relos(struct bpf_object *obj,
5026 GElf_Shdr *shdr, Elf_Data *data)
5028 int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
5029 const struct btf_var_secinfo *vi = NULL;
5030 const struct btf_type *sec, *var, *def;
5031 const struct btf_member *member;
5032 struct bpf_map *map, *targ_map;
5033 const char *name, *mname;
5040 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
5042 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
5046 symbols = obj->efile.symbols;
5047 nrels = shdr->sh_size / shdr->sh_entsize;
5048 for (i = 0; i < nrels; i++) {
5049 if (!gelf_getrel(data, i, &rel)) {
5050 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
5051 return -LIBBPF_ERRNO__FORMAT;
5053 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
5054 pr_warn(".maps relo #%d: symbol %zx not found\n",
5055 i, (size_t)GELF_R_SYM(rel.r_info));
5056 return -LIBBPF_ERRNO__FORMAT;
5058 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
5059 sym.st_name) ? : "<?>";
5060 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
5061 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
5063 return -LIBBPF_ERRNO__RELOC;
5066 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
5067 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
5068 (size_t)rel.r_offset, sym.st_name, name);
5070 for (j = 0; j < obj->nr_maps; j++) {
5071 map = &obj->maps[j];
5072 if (map->sec_idx != obj->efile.btf_maps_shndx)
5075 vi = btf_var_secinfos(sec) + map->btf_var_idx;
5076 if (vi->offset <= rel.r_offset &&
5077 rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
5080 if (j == obj->nr_maps) {
5081 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
5082 i, name, (size_t)rel.r_offset);
5086 if (!bpf_map_type__is_map_in_map(map->def.type))
5088 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
5089 map->def.key_size != sizeof(int)) {
5090 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
5091 i, map->name, sizeof(int));
5095 targ_map = bpf_object__find_map_by_name(obj, name);
5099 var = btf__type_by_id(obj->btf, vi->type);
5100 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
5101 if (btf_vlen(def) == 0)
5103 member = btf_members(def) + btf_vlen(def) - 1;
5104 mname = btf__name_by_offset(obj->btf, member->name_off);
5105 if (strcmp(mname, "values"))
5108 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
5109 if (rel.r_offset - vi->offset < moff)
5112 moff = rel.r_offset - vi->offset - moff;
5116 if (moff >= map->init_slots_sz) {
5118 tmp = realloc(map->init_slots, new_sz * ptr_sz);
5121 map->init_slots = tmp;
5122 memset(map->init_slots + map->init_slots_sz, 0,
5123 (new_sz - map->init_slots_sz) * ptr_sz);
5124 map->init_slots_sz = new_sz;
5126 map->init_slots[moff] = targ_map;
5128 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
5129 i, map->name, moff, name);
5135 static int bpf_object__collect_reloc(struct bpf_object *obj)
5139 if (!obj_elf_valid(obj)) {
5140 pr_warn("Internal error: elf object is closed\n");
5141 return -LIBBPF_ERRNO__INTERNAL;
5144 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
5145 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
5146 Elf_Data *data = obj->efile.reloc_sects[i].data;
5147 int idx = shdr->sh_info;
5148 struct bpf_program *prog;
5150 if (shdr->sh_type != SHT_REL) {
5151 pr_warn("internal error at %d\n", __LINE__);
5152 return -LIBBPF_ERRNO__INTERNAL;
5155 if (idx == obj->efile.st_ops_shndx) {
5156 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
5157 } else if (idx == obj->efile.btf_maps_shndx) {
5158 err = bpf_object__collect_map_relos(obj, shdr, data);
5160 prog = bpf_object__find_prog_by_idx(obj, idx);
5162 pr_warn("relocation failed: no prog in section(%d)\n", idx);
5163 return -LIBBPF_ERRNO__RELOC;
5165 err = bpf_program__collect_reloc(prog, shdr, data, obj);
5174 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
5175 char *license, __u32 kern_version, int *pfd)
5177 struct bpf_load_program_attr load_attr;
5178 char *cp, errmsg[STRERR_BUFSIZE];
5179 size_t log_buf_size = 0;
5180 char *log_buf = NULL;
5183 if (!insns || !insns_cnt)
5186 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
5187 load_attr.prog_type = prog->type;
5188 /* old kernels might not support specifying expected_attach_type */
5189 if (!prog->caps->exp_attach_type && prog->sec_def &&
5190 prog->sec_def->is_exp_attach_type_optional)
5191 load_attr.expected_attach_type = 0;
5193 load_attr.expected_attach_type = prog->expected_attach_type;
5194 if (prog->caps->name)
5195 load_attr.name = prog->name;
5196 load_attr.insns = insns;
5197 load_attr.insns_cnt = insns_cnt;
5198 load_attr.license = license;
5199 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
5200 prog->type == BPF_PROG_TYPE_LSM) {
5201 load_attr.attach_btf_id = prog->attach_btf_id;
5202 } else if (prog->type == BPF_PROG_TYPE_TRACING ||
5203 prog->type == BPF_PROG_TYPE_EXT) {
5204 load_attr.attach_prog_fd = prog->attach_prog_fd;
5205 load_attr.attach_btf_id = prog->attach_btf_id;
5207 load_attr.kern_version = kern_version;
5208 load_attr.prog_ifindex = prog->prog_ifindex;
5210 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
5211 if (prog->obj->btf_ext)
5212 btf_fd = bpf_object__btf_fd(prog->obj);
5215 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
5216 load_attr.func_info = prog->func_info;
5217 load_attr.func_info_rec_size = prog->func_info_rec_size;
5218 load_attr.func_info_cnt = prog->func_info_cnt;
5219 load_attr.line_info = prog->line_info;
5220 load_attr.line_info_rec_size = prog->line_info_rec_size;
5221 load_attr.line_info_cnt = prog->line_info_cnt;
5222 load_attr.log_level = prog->log_level;
5223 load_attr.prog_flags = prog->prog_flags;
5227 log_buf = malloc(log_buf_size);
5234 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
5237 if (log_buf && load_attr.log_level)
5238 pr_debug("verifier log:\n%s", log_buf);
5244 if (!log_buf || errno == ENOSPC) {
5245 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
5252 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
5253 pr_warn("load bpf program failed: %s\n", cp);
5256 if (log_buf && log_buf[0] != '\0') {
5257 ret = -LIBBPF_ERRNO__VERIFY;
5258 pr_warn("-- BEGIN DUMP LOG ---\n");
5259 pr_warn("\n%s\n", log_buf);
5260 pr_warn("-- END LOG --\n");
5261 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
5262 pr_warn("Program too large (%zu insns), at most %d insns\n",
5263 load_attr.insns_cnt, BPF_MAXINSNS);
5264 ret = -LIBBPF_ERRNO__PROG2BIG;
5265 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
5266 /* Wrong program type? */
5269 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
5270 load_attr.expected_attach_type = 0;
5271 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
5274 ret = -LIBBPF_ERRNO__PROGTYPE;
5284 static int libbpf_find_attach_btf_id(struct bpf_program *prog);
5286 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
5288 int err = 0, fd, i, btf_id;
5290 if ((prog->type == BPF_PROG_TYPE_TRACING ||
5291 prog->type == BPF_PROG_TYPE_LSM ||
5292 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
5293 btf_id = libbpf_find_attach_btf_id(prog);
5296 prog->attach_btf_id = btf_id;
5299 if (prog->instances.nr < 0 || !prog->instances.fds) {
5300 if (prog->preprocessor) {
5301 pr_warn("Internal error: can't load program '%s'\n",
5302 prog->section_name);
5303 return -LIBBPF_ERRNO__INTERNAL;
5306 prog->instances.fds = malloc(sizeof(int));
5307 if (!prog->instances.fds) {
5308 pr_warn("Not enough memory for BPF fds\n");
5311 prog->instances.nr = 1;
5312 prog->instances.fds[0] = -1;
5315 if (!prog->preprocessor) {
5316 if (prog->instances.nr != 1) {
5317 pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
5318 prog->section_name, prog->instances.nr);
5320 err = load_program(prog, prog->insns, prog->insns_cnt,
5321 license, kern_ver, &fd);
5323 prog->instances.fds[0] = fd;
5327 for (i = 0; i < prog->instances.nr; i++) {
5328 struct bpf_prog_prep_result result;
5329 bpf_program_prep_t preprocessor = prog->preprocessor;
5331 memset(&result, 0, sizeof(result));
5332 err = preprocessor(prog, i, prog->insns,
5333 prog->insns_cnt, &result);
5335 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
5336 i, prog->section_name);
5340 if (!result.new_insn_ptr || !result.new_insn_cnt) {
5341 pr_debug("Skip loading the %dth instance of program '%s'\n",
5342 i, prog->section_name);
5343 prog->instances.fds[i] = -1;
5349 err = load_program(prog, result.new_insn_ptr,
5350 result.new_insn_cnt, license, kern_ver, &fd);
5352 pr_warn("Loading the %dth instance of program '%s' failed\n",
5353 i, prog->section_name);
5359 prog->instances.fds[i] = fd;
5363 pr_warn("failed to load program '%s'\n", prog->section_name);
5364 zfree(&prog->insns);
5365 prog->insns_cnt = 0;
5369 static bool bpf_program__is_function_storage(const struct bpf_program *prog,
5370 const struct bpf_object *obj)
5372 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
5376 bpf_object__load_progs(struct bpf_object *obj, int log_level)
5381 for (i = 0; i < obj->nr_programs; i++) {
5382 if (bpf_program__is_function_storage(&obj->programs[i], obj))
5384 obj->programs[i].log_level |= log_level;
5385 err = bpf_program__load(&obj->programs[i],
5394 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
5396 static struct bpf_object *
5397 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
5398 const struct bpf_object_open_opts *opts)
5400 const char *obj_name, *kconfig;
5401 struct bpf_program *prog;
5402 struct bpf_object *obj;
5406 if (elf_version(EV_CURRENT) == EV_NONE) {
5407 pr_warn("failed to init libelf for %s\n",
5408 path ? : "(mem buf)");
5409 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
5412 if (!OPTS_VALID(opts, bpf_object_open_opts))
5413 return ERR_PTR(-EINVAL);
5415 obj_name = OPTS_GET(opts, object_name, NULL);
5418 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
5419 (unsigned long)obj_buf,
5420 (unsigned long)obj_buf_sz);
5421 obj_name = tmp_name;
5424 pr_debug("loading object '%s' from buffer\n", obj_name);
5427 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
5431 kconfig = OPTS_GET(opts, kconfig, NULL);
5433 obj->kconfig = strdup(kconfig);
5435 return ERR_PTR(-ENOMEM);
5438 err = bpf_object__elf_init(obj);
5439 err = err ? : bpf_object__check_endianness(obj);
5440 err = err ? : bpf_object__elf_collect(obj);
5441 err = err ? : bpf_object__collect_externs(obj);
5442 err = err ? : bpf_object__finalize_btf(obj);
5443 err = err ? : bpf_object__init_maps(obj, opts);
5444 err = err ? : bpf_object__init_prog_names(obj);
5445 err = err ? : bpf_object__collect_reloc(obj);
5448 bpf_object__elf_finish(obj);
5450 bpf_object__for_each_program(prog, obj) {
5451 prog->sec_def = find_sec_def(prog->section_name);
5453 /* couldn't guess, but user might manually specify */
5456 bpf_program__set_type(prog, prog->sec_def->prog_type);
5457 bpf_program__set_expected_attach_type(prog,
5458 prog->sec_def->expected_attach_type);
5460 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
5461 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
5462 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
5467 bpf_object__close(obj);
5468 return ERR_PTR(err);
5471 static struct bpf_object *
5472 __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
5474 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
5475 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
5478 /* param validation */
5482 pr_debug("loading %s\n", attr->file);
5483 return __bpf_object__open(attr->file, NULL, 0, &opts);
5486 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
5488 return __bpf_object__open_xattr(attr, 0);
5491 struct bpf_object *bpf_object__open(const char *path)
5493 struct bpf_object_open_attr attr = {
5495 .prog_type = BPF_PROG_TYPE_UNSPEC,
5498 return bpf_object__open_xattr(&attr);
5502 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
5505 return ERR_PTR(-EINVAL);
5507 pr_debug("loading %s\n", path);
5509 return __bpf_object__open(path, NULL, 0, opts);
5513 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
5514 const struct bpf_object_open_opts *opts)
5516 if (!obj_buf || obj_buf_sz == 0)
5517 return ERR_PTR(-EINVAL);
5519 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
5523 bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
5526 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
5527 .object_name = name,
5528 /* wrong default, but backwards-compatible */
5529 .relaxed_maps = true,
5532 /* returning NULL is wrong, but backwards-compatible */
5533 if (!obj_buf || obj_buf_sz == 0)
5536 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
5539 int bpf_object__unload(struct bpf_object *obj)
5546 for (i = 0; i < obj->nr_maps; i++) {
5547 zclose(obj->maps[i].fd);
5548 if (obj->maps[i].st_ops)
5549 zfree(&obj->maps[i].st_ops->kern_vdata);
5552 for (i = 0; i < obj->nr_programs; i++)
5553 bpf_program__unload(&obj->programs[i]);
5558 static int bpf_object__sanitize_maps(struct bpf_object *obj)
5562 bpf_object__for_each_map(m, obj) {
5563 if (!bpf_map__is_internal(m))
5565 if (!obj->caps.global_data) {
5566 pr_warn("kernel doesn't support global data\n");
5569 if (!obj->caps.array_mmap)
5570 m->def.map_flags ^= BPF_F_MMAPABLE;
5576 static int bpf_object__resolve_externs(struct bpf_object *obj,
5577 const char *extra_kconfig)
5579 bool need_config = false;
5580 struct extern_desc *ext;
5584 if (obj->nr_extern == 0)
5587 data = obj->maps[obj->kconfig_map_idx].mmaped;
5589 for (i = 0; i < obj->nr_extern; i++) {
5590 ext = &obj->externs[i];
5592 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
5593 void *ext_val = data + ext->data_off;
5594 __u32 kver = get_kernel_version();
5597 pr_warn("failed to get kernel version\n");
5600 err = set_ext_value_num(ext, ext_val, kver);
5603 pr_debug("extern %s=0x%x\n", ext->name, kver);
5604 } else if (strncmp(ext->name, "CONFIG_", 7) == 0) {
5607 pr_warn("unrecognized extern '%s'\n", ext->name);
5611 if (need_config && extra_kconfig) {
5612 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, data);
5615 need_config = false;
5616 for (i = 0; i < obj->nr_extern; i++) {
5617 ext = &obj->externs[i];
5625 err = bpf_object__read_kconfig_file(obj, data);
5629 for (i = 0; i < obj->nr_extern; i++) {
5630 ext = &obj->externs[i];
5632 if (!ext->is_set && !ext->is_weak) {
5633 pr_warn("extern %s (strong) not resolved\n", ext->name);
5635 } else if (!ext->is_set) {
5636 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
5644 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
5646 struct bpf_object *obj;
5656 pr_warn("object should not be loaded twice\n");
5662 err = bpf_object__probe_loading(obj);
5663 err = err ? : bpf_object__probe_caps(obj);
5664 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
5665 err = err ? : bpf_object__sanitize_and_load_btf(obj);
5666 err = err ? : bpf_object__sanitize_maps(obj);
5667 err = err ? : bpf_object__load_vmlinux_btf(obj);
5668 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
5669 err = err ? : bpf_object__create_maps(obj);
5670 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
5671 err = err ? : bpf_object__load_progs(obj, attr->log_level);
5673 btf__free(obj->btf_vmlinux);
5674 obj->btf_vmlinux = NULL;
5681 /* unpin any maps that were auto-pinned during load */
5682 for (i = 0; i < obj->nr_maps; i++)
5683 if (obj->maps[i].pinned && !obj->maps[i].reused)
5684 bpf_map__unpin(&obj->maps[i], NULL);
5686 bpf_object__unload(obj);
5687 pr_warn("failed to load object '%s'\n", obj->path);
5691 int bpf_object__load(struct bpf_object *obj)
5693 struct bpf_object_load_attr attr = {
5697 return bpf_object__load_xattr(&attr);
5700 static int make_parent_dir(const char *path)
5702 char *cp, errmsg[STRERR_BUFSIZE];
5706 dname = strdup(path);
5710 dir = dirname(dname);
5711 if (mkdir(dir, 0700) && errno != EEXIST)
5716 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5717 pr_warn("failed to mkdir %s: %s\n", path, cp);
5722 static int check_path(const char *path)
5724 char *cp, errmsg[STRERR_BUFSIZE];
5725 struct statfs st_fs;
5732 dname = strdup(path);
5736 dir = dirname(dname);
5737 if (statfs(dir, &st_fs)) {
5738 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
5739 pr_warn("failed to statfs %s: %s\n", dir, cp);
5744 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
5745 pr_warn("specified path %s is not on BPF FS\n", path);
5752 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
5755 char *cp, errmsg[STRERR_BUFSIZE];
5758 err = make_parent_dir(path);
5762 err = check_path(path);
5767 pr_warn("invalid program pointer\n");
5771 if (instance < 0 || instance >= prog->instances.nr) {
5772 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
5773 instance, prog->section_name, prog->instances.nr);
5777 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
5778 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
5779 pr_warn("failed to pin program: %s\n", cp);
5782 pr_debug("pinned program '%s'\n", path);
5787 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
5792 err = check_path(path);
5797 pr_warn("invalid program pointer\n");
5801 if (instance < 0 || instance >= prog->instances.nr) {
5802 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
5803 instance, prog->section_name, prog->instances.nr);
5810 pr_debug("unpinned program '%s'\n", path);
5815 int bpf_program__pin(struct bpf_program *prog, const char *path)
5819 err = make_parent_dir(path);
5823 err = check_path(path);
5828 pr_warn("invalid program pointer\n");
5832 if (prog->instances.nr <= 0) {
5833 pr_warn("no instances of prog %s to pin\n",
5834 prog->section_name);
5838 if (prog->instances.nr == 1) {
5839 /* don't create subdirs when pinning single instance */
5840 return bpf_program__pin_instance(prog, path, 0);
5843 for (i = 0; i < prog->instances.nr; i++) {
5847 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
5851 } else if (len >= PATH_MAX) {
5852 err = -ENAMETOOLONG;
5856 err = bpf_program__pin_instance(prog, buf, i);
5864 for (i = i - 1; i >= 0; i--) {
5868 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
5871 else if (len >= PATH_MAX)
5874 bpf_program__unpin_instance(prog, buf, i);
5882 int bpf_program__unpin(struct bpf_program *prog, const char *path)
5886 err = check_path(path);
5891 pr_warn("invalid program pointer\n");
5895 if (prog->instances.nr <= 0) {
5896 pr_warn("no instances of prog %s to pin\n",
5897 prog->section_name);
5901 if (prog->instances.nr == 1) {
5902 /* don't create subdirs when pinning single instance */
5903 return bpf_program__unpin_instance(prog, path, 0);
5906 for (i = 0; i < prog->instances.nr; i++) {
5910 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
5913 else if (len >= PATH_MAX)
5914 return -ENAMETOOLONG;
5916 err = bpf_program__unpin_instance(prog, buf, i);
5928 int bpf_map__pin(struct bpf_map *map, const char *path)
5930 char *cp, errmsg[STRERR_BUFSIZE];
5934 pr_warn("invalid map pointer\n");
5938 if (map->pin_path) {
5939 if (path && strcmp(path, map->pin_path)) {
5940 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
5941 bpf_map__name(map), map->pin_path, path);
5943 } else if (map->pinned) {
5944 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
5945 bpf_map__name(map), map->pin_path);
5950 pr_warn("missing a path to pin map '%s' at\n",
5951 bpf_map__name(map));
5953 } else if (map->pinned) {
5954 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
5958 map->pin_path = strdup(path);
5959 if (!map->pin_path) {
5965 err = make_parent_dir(map->pin_path);
5969 err = check_path(map->pin_path);
5973 if (bpf_obj_pin(map->fd, map->pin_path)) {
5979 pr_debug("pinned map '%s'\n", map->pin_path);
5984 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5985 pr_warn("failed to pin map: %s\n", cp);
5989 int bpf_map__unpin(struct bpf_map *map, const char *path)
5994 pr_warn("invalid map pointer\n");
5998 if (map->pin_path) {
5999 if (path && strcmp(path, map->pin_path)) {
6000 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
6001 bpf_map__name(map), map->pin_path, path);
6004 path = map->pin_path;
6006 pr_warn("no path to unpin map '%s' from\n",
6007 bpf_map__name(map));
6011 err = check_path(path);
6019 map->pinned = false;
6020 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
6025 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
6035 free(map->pin_path);
6036 map->pin_path = new;
6040 const char *bpf_map__get_pin_path(const struct bpf_map *map)
6042 return map->pin_path;
6045 bool bpf_map__is_pinned(const struct bpf_map *map)
6050 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
6052 struct bpf_map *map;
6059 pr_warn("object not yet loaded; load it first\n");
6063 bpf_object__for_each_map(map, obj) {
6064 char *pin_path = NULL;
6070 len = snprintf(buf, PATH_MAX, "%s/%s", path,
6071 bpf_map__name(map));
6074 goto err_unpin_maps;
6075 } else if (len >= PATH_MAX) {
6076 err = -ENAMETOOLONG;
6077 goto err_unpin_maps;
6080 } else if (!map->pin_path) {
6084 err = bpf_map__pin(map, pin_path);
6086 goto err_unpin_maps;
6092 while ((map = bpf_map__prev(map, obj))) {
6096 bpf_map__unpin(map, NULL);
6102 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
6104 struct bpf_map *map;
6110 bpf_object__for_each_map(map, obj) {
6111 char *pin_path = NULL;
6117 len = snprintf(buf, PATH_MAX, "%s/%s", path,
6118 bpf_map__name(map));
6121 else if (len >= PATH_MAX)
6122 return -ENAMETOOLONG;
6124 } else if (!map->pin_path) {
6128 err = bpf_map__unpin(map, pin_path);
6136 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
6138 struct bpf_program *prog;
6145 pr_warn("object not yet loaded; load it first\n");
6149 bpf_object__for_each_program(prog, obj) {
6153 len = snprintf(buf, PATH_MAX, "%s/%s", path,
6157 goto err_unpin_programs;
6158 } else if (len >= PATH_MAX) {
6159 err = -ENAMETOOLONG;
6160 goto err_unpin_programs;
6163 err = bpf_program__pin(prog, buf);
6165 goto err_unpin_programs;
6171 while ((prog = bpf_program__prev(prog, obj))) {
6175 len = snprintf(buf, PATH_MAX, "%s/%s", path,
6179 else if (len >= PATH_MAX)
6182 bpf_program__unpin(prog, buf);
6188 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
6190 struct bpf_program *prog;
6196 bpf_object__for_each_program(prog, obj) {
6200 len = snprintf(buf, PATH_MAX, "%s/%s", path,
6204 else if (len >= PATH_MAX)
6205 return -ENAMETOOLONG;
6207 err = bpf_program__unpin(prog, buf);
6215 int bpf_object__pin(struct bpf_object *obj, const char *path)
6219 err = bpf_object__pin_maps(obj, path);
6223 err = bpf_object__pin_programs(obj, path);
6225 bpf_object__unpin_maps(obj, path);
6232 static void bpf_map__destroy(struct bpf_map *map)
6234 if (map->clear_priv)
6235 map->clear_priv(map, map->priv);
6237 map->clear_priv = NULL;
6239 if (map->inner_map) {
6240 bpf_map__destroy(map->inner_map);
6241 zfree(&map->inner_map);
6244 zfree(&map->init_slots);
6245 map->init_slots_sz = 0;
6248 munmap(map->mmaped, bpf_map_mmap_sz(map));
6253 zfree(&map->st_ops->data);
6254 zfree(&map->st_ops->progs);
6255 zfree(&map->st_ops->kern_func_off);
6256 zfree(&map->st_ops);
6260 zfree(&map->pin_path);
6266 void bpf_object__close(struct bpf_object *obj)
6273 if (obj->clear_priv)
6274 obj->clear_priv(obj, obj->priv);
6276 bpf_object__elf_finish(obj);
6277 bpf_object__unload(obj);
6278 btf__free(obj->btf);
6279 btf_ext__free(obj->btf_ext);
6281 for (i = 0; i < obj->nr_maps; i++)
6282 bpf_map__destroy(&obj->maps[i]);
6284 zfree(&obj->kconfig);
6285 zfree(&obj->externs);
6291 if (obj->programs && obj->nr_programs) {
6292 for (i = 0; i < obj->nr_programs; i++)
6293 bpf_program__exit(&obj->programs[i]);
6295 zfree(&obj->programs);
6297 list_del(&obj->list);
6302 bpf_object__next(struct bpf_object *prev)
6304 struct bpf_object *next;
6307 next = list_first_entry(&bpf_objects_list,
6311 next = list_next_entry(prev, list);
6313 /* Empty list is noticed here so don't need checking on entry. */
6314 if (&next->list == &bpf_objects_list)
6320 const char *bpf_object__name(const struct bpf_object *obj)
6322 return obj ? obj->name : ERR_PTR(-EINVAL);
6325 unsigned int bpf_object__kversion(const struct bpf_object *obj)
6327 return obj ? obj->kern_version : 0;
6330 struct btf *bpf_object__btf(const struct bpf_object *obj)
6332 return obj ? obj->btf : NULL;
6335 int bpf_object__btf_fd(const struct bpf_object *obj)
6337 return obj->btf ? btf__fd(obj->btf) : -1;
6340 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
6341 bpf_object_clear_priv_t clear_priv)
6343 if (obj->priv && obj->clear_priv)
6344 obj->clear_priv(obj, obj->priv);
6347 obj->clear_priv = clear_priv;
6351 void *bpf_object__priv(const struct bpf_object *obj)
6353 return obj ? obj->priv : ERR_PTR(-EINVAL);
6356 static struct bpf_program *
6357 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
6360 size_t nr_programs = obj->nr_programs;
6367 /* Iter from the beginning */
6368 return forward ? &obj->programs[0] :
6369 &obj->programs[nr_programs - 1];
6371 if (p->obj != obj) {
6372 pr_warn("error: program handler doesn't match object\n");
6376 idx = (p - obj->programs) + (forward ? 1 : -1);
6377 if (idx >= obj->nr_programs || idx < 0)
6379 return &obj->programs[idx];
6382 struct bpf_program *
6383 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
6385 struct bpf_program *prog = prev;
6388 prog = __bpf_program__iter(prog, obj, true);
6389 } while (prog && bpf_program__is_function_storage(prog, obj));
6394 struct bpf_program *
6395 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
6397 struct bpf_program *prog = next;
6400 prog = __bpf_program__iter(prog, obj, false);
6401 } while (prog && bpf_program__is_function_storage(prog, obj));
6406 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
6407 bpf_program_clear_priv_t clear_priv)
6409 if (prog->priv && prog->clear_priv)
6410 prog->clear_priv(prog, prog->priv);
6413 prog->clear_priv = clear_priv;
6417 void *bpf_program__priv(const struct bpf_program *prog)
6419 return prog ? prog->priv : ERR_PTR(-EINVAL);
6422 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
6424 prog->prog_ifindex = ifindex;
6427 const char *bpf_program__name(const struct bpf_program *prog)
6432 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
6436 title = prog->section_name;
6438 title = strdup(title);
6440 pr_warn("failed to strdup program title\n");
6441 return ERR_PTR(-ENOMEM);
6448 int bpf_program__fd(const struct bpf_program *prog)
6450 return bpf_program__nth_fd(prog, 0);
6453 size_t bpf_program__size(const struct bpf_program *prog)
6455 return prog->insns_cnt * sizeof(struct bpf_insn);
6458 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
6459 bpf_program_prep_t prep)
6463 if (nr_instances <= 0 || !prep)
6466 if (prog->instances.nr > 0 || prog->instances.fds) {
6467 pr_warn("Can't set pre-processor after loading\n");
6471 instances_fds = malloc(sizeof(int) * nr_instances);
6472 if (!instances_fds) {
6473 pr_warn("alloc memory failed for fds\n");
6477 /* fill all fd with -1 */
6478 memset(instances_fds, -1, sizeof(int) * nr_instances);
6480 prog->instances.nr = nr_instances;
6481 prog->instances.fds = instances_fds;
6482 prog->preprocessor = prep;
6486 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
6493 if (n >= prog->instances.nr || n < 0) {
6494 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
6495 n, prog->section_name, prog->instances.nr);
6499 fd = prog->instances.fds[n];
6501 pr_warn("%dth instance of program '%s' is invalid\n",
6502 n, prog->section_name);
6509 enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
6514 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
6519 static bool bpf_program__is_type(const struct bpf_program *prog,
6520 enum bpf_prog_type type)
6522 return prog ? (prog->type == type) : false;
6525 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
6526 int bpf_program__set_##NAME(struct bpf_program *prog) \
6530 bpf_program__set_type(prog, TYPE); \
6534 bool bpf_program__is_##NAME(const struct bpf_program *prog) \
6536 return bpf_program__is_type(prog, TYPE); \
6539 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
6540 BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
6541 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
6542 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
6543 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
6544 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
6545 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
6546 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
6547 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
6548 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
6549 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
6550 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
6552 enum bpf_attach_type
6553 bpf_program__get_expected_attach_type(struct bpf_program *prog)
6555 return prog->expected_attach_type;
6558 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
6559 enum bpf_attach_type type)
6561 prog->expected_attach_type = type;
6564 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
6565 attachable, attach_btf) \
6568 .len = sizeof(string) - 1, \
6569 .prog_type = ptype, \
6570 .expected_attach_type = eatype, \
6571 .is_exp_attach_type_optional = eatype_optional, \
6572 .is_attachable = attachable, \
6573 .is_attach_btf = attach_btf, \
6576 /* Programs that can NOT be attached. */
6577 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
6579 /* Programs that can be attached. */
6580 #define BPF_APROG_SEC(string, ptype, atype) \
6581 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
6583 /* Programs that must specify expected attach type at load time. */
6584 #define BPF_EAPROG_SEC(string, ptype, eatype) \
6585 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
6587 /* Programs that use BTF to identify attach point */
6588 #define BPF_PROG_BTF(string, ptype, eatype) \
6589 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
6591 /* Programs that can be attached but attach type can't be identified by section
6592 * name. Kept for backward compatibility.
6594 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
6596 #define SEC_DEF(sec_pfx, ptype, ...) { \
6598 .len = sizeof(sec_pfx) - 1, \
6599 .prog_type = BPF_PROG_TYPE_##ptype, \
6603 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
6604 struct bpf_program *prog);
6605 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
6606 struct bpf_program *prog);
6607 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
6608 struct bpf_program *prog);
6609 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
6610 struct bpf_program *prog);
6611 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
6612 struct bpf_program *prog);
6613 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
6614 struct bpf_program *prog);
6616 static const struct bpf_sec_def section_defs[] = {
6617 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
6618 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
6619 SEC_DEF("kprobe/", KPROBE,
6620 .attach_fn = attach_kprobe),
6621 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
6622 SEC_DEF("kretprobe/", KPROBE,
6623 .attach_fn = attach_kprobe),
6624 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
6625 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
6626 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
6627 SEC_DEF("tracepoint/", TRACEPOINT,
6628 .attach_fn = attach_tp),
6629 SEC_DEF("tp/", TRACEPOINT,
6630 .attach_fn = attach_tp),
6631 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
6632 .attach_fn = attach_raw_tp),
6633 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
6634 .attach_fn = attach_raw_tp),
6635 SEC_DEF("tp_btf/", TRACING,
6636 .expected_attach_type = BPF_TRACE_RAW_TP,
6637 .is_attach_btf = true,
6638 .attach_fn = attach_trace),
6639 SEC_DEF("fentry/", TRACING,
6640 .expected_attach_type = BPF_TRACE_FENTRY,
6641 .is_attach_btf = true,
6642 .attach_fn = attach_trace),
6643 SEC_DEF("fmod_ret/", TRACING,
6644 .expected_attach_type = BPF_MODIFY_RETURN,
6645 .is_attach_btf = true,
6646 .attach_fn = attach_trace),
6647 SEC_DEF("fexit/", TRACING,
6648 .expected_attach_type = BPF_TRACE_FEXIT,
6649 .is_attach_btf = true,
6650 .attach_fn = attach_trace),
6651 SEC_DEF("freplace/", EXT,
6652 .is_attach_btf = true,
6653 .attach_fn = attach_trace),
6654 SEC_DEF("lsm/", LSM,
6655 .is_attach_btf = true,
6656 .expected_attach_type = BPF_LSM_MAC,
6657 .attach_fn = attach_lsm),
6658 SEC_DEF("iter/", TRACING,
6659 .expected_attach_type = BPF_TRACE_ITER,
6660 .is_attach_btf = true,
6661 .attach_fn = attach_iter),
6662 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
6664 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
6665 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
6666 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
6667 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
6668 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
6669 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
6670 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
6671 BPF_CGROUP_INET_INGRESS),
6672 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
6673 BPF_CGROUP_INET_EGRESS),
6674 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
6675 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
6676 BPF_CGROUP_INET_SOCK_CREATE),
6677 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
6678 BPF_CGROUP_INET4_POST_BIND),
6679 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
6680 BPF_CGROUP_INET6_POST_BIND),
6681 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
6683 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
6684 BPF_CGROUP_SOCK_OPS),
6685 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
6686 BPF_SK_SKB_STREAM_PARSER),
6687 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
6688 BPF_SK_SKB_STREAM_VERDICT),
6689 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
6690 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
6691 BPF_SK_MSG_VERDICT),
6692 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
6694 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
6695 BPF_FLOW_DISSECTOR),
6696 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6697 BPF_CGROUP_INET4_BIND),
6698 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6699 BPF_CGROUP_INET6_BIND),
6700 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6701 BPF_CGROUP_INET4_CONNECT),
6702 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6703 BPF_CGROUP_INET6_CONNECT),
6704 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6705 BPF_CGROUP_UDP4_SENDMSG),
6706 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6707 BPF_CGROUP_UDP6_SENDMSG),
6708 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6709 BPF_CGROUP_UDP4_RECVMSG),
6710 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6711 BPF_CGROUP_UDP6_RECVMSG),
6712 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6713 BPF_CGROUP_INET4_GETPEERNAME),
6714 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6715 BPF_CGROUP_INET6_GETPEERNAME),
6716 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6717 BPF_CGROUP_INET4_GETSOCKNAME),
6718 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6719 BPF_CGROUP_INET6_GETSOCKNAME),
6720 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
6722 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
6723 BPF_CGROUP_GETSOCKOPT),
6724 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
6725 BPF_CGROUP_SETSOCKOPT),
6726 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
6729 #undef BPF_PROG_SEC_IMPL
6731 #undef BPF_APROG_SEC
6732 #undef BPF_EAPROG_SEC
6733 #undef BPF_APROG_COMPAT
6736 #define MAX_TYPE_NAME_SIZE 32
6738 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
6740 int i, n = ARRAY_SIZE(section_defs);
6742 for (i = 0; i < n; i++) {
6743 if (strncmp(sec_name,
6744 section_defs[i].sec, section_defs[i].len))
6746 return §ion_defs[i];
6751 static char *libbpf_get_type_names(bool attach_type)
6753 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
6761 /* Forge string buf with all available names */
6762 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
6763 if (attach_type && !section_defs[i].is_attachable)
6766 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
6771 strcat(buf, section_defs[i].sec);
6777 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
6778 enum bpf_attach_type *expected_attach_type)
6780 const struct bpf_sec_def *sec_def;
6786 sec_def = find_sec_def(name);
6788 *prog_type = sec_def->prog_type;
6789 *expected_attach_type = sec_def->expected_attach_type;
6793 pr_debug("failed to guess program type from ELF section '%s'\n", name);
6794 type_names = libbpf_get_type_names(false);
6795 if (type_names != NULL) {
6796 pr_debug("supported section(type) names are:%s\n", type_names);
6803 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
6806 struct bpf_map *map;
6809 for (i = 0; i < obj->nr_maps; i++) {
6810 map = &obj->maps[i];
6811 if (!bpf_map__is_struct_ops(map))
6813 if (map->sec_offset <= offset &&
6814 offset - map->sec_offset < map->def.value_size)
6821 /* Collect the reloc from ELF and populate the st_ops->progs[] */
6822 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6823 GElf_Shdr *shdr, Elf_Data *data)
6825 const struct btf_member *member;
6826 struct bpf_struct_ops *st_ops;
6827 struct bpf_program *prog;
6828 unsigned int shdr_idx;
6829 const struct btf *btf;
6830 struct bpf_map *map;
6839 symbols = obj->efile.symbols;
6841 nrels = shdr->sh_size / shdr->sh_entsize;
6842 for (i = 0; i < nrels; i++) {
6843 if (!gelf_getrel(data, i, &rel)) {
6844 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
6845 return -LIBBPF_ERRNO__FORMAT;
6848 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6849 pr_warn("struct_ops reloc: symbol %zx not found\n",
6850 (size_t)GELF_R_SYM(rel.r_info));
6851 return -LIBBPF_ERRNO__FORMAT;
6854 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
6855 sym.st_name) ? : "<?>";
6856 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
6858 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
6859 (size_t)rel.r_offset);
6863 moff = rel.r_offset - map->sec_offset;
6864 shdr_idx = sym.st_shndx;
6865 st_ops = map->st_ops;
6866 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
6868 (long long)(rel.r_info >> 32),
6869 (long long)sym.st_value,
6870 shdr_idx, (size_t)rel.r_offset,
6871 map->sec_offset, sym.st_name, name);
6873 if (shdr_idx >= SHN_LORESERVE) {
6874 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
6875 map->name, (size_t)rel.r_offset, shdr_idx);
6876 return -LIBBPF_ERRNO__RELOC;
6879 member = find_member_by_offset(st_ops->type, moff * 8);
6881 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
6885 member_idx = member - btf_members(st_ops->type);
6886 name = btf__name_by_offset(btf, member->name_off);
6888 if (!resolve_func_ptr(btf, member->type, NULL)) {
6889 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
6894 prog = bpf_object__find_prog_by_idx(obj, shdr_idx);
6896 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
6897 map->name, shdr_idx, name);
6901 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6902 const struct bpf_sec_def *sec_def;
6904 sec_def = find_sec_def(prog->section_name);
6906 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
6908 prog->type = sec_def->prog_type;
6912 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
6913 prog->attach_btf_id = st_ops->type_id;
6914 prog->expected_attach_type = member_idx;
6915 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
6916 prog->attach_btf_id != st_ops->type_id ||
6917 prog->expected_attach_type != member_idx) {
6920 st_ops->progs[member_idx] = prog;
6926 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
6927 map->name, prog->name, prog->section_name, prog->type,
6928 prog->attach_btf_id, prog->expected_attach_type, name);
6932 #define BTF_TRACE_PREFIX "btf_trace_"
6933 #define BTF_LSM_PREFIX "bpf_lsm_"
6934 #define BTF_ITER_PREFIX "bpf_iter_"
6935 #define BTF_MAX_NAME_SIZE 128
6937 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
6938 const char *name, __u32 kind)
6940 char btf_type_name[BTF_MAX_NAME_SIZE];
6943 ret = snprintf(btf_type_name, sizeof(btf_type_name),
6944 "%s%s", prefix, name);
6945 /* snprintf returns the number of characters written excluding the
6946 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
6947 * indicates truncation.
6949 if (ret < 0 || ret >= sizeof(btf_type_name))
6950 return -ENAMETOOLONG;
6951 return btf__find_by_name_kind(btf, btf_type_name, kind);
6954 static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
6955 enum bpf_attach_type attach_type)
6959 if (attach_type == BPF_TRACE_RAW_TP)
6960 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
6962 else if (attach_type == BPF_LSM_MAC)
6963 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
6965 else if (attach_type == BPF_TRACE_ITER)
6966 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
6969 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
6972 pr_warn("%s is not found in vmlinux BTF\n", name);
6977 int libbpf_find_vmlinux_btf_id(const char *name,
6978 enum bpf_attach_type attach_type)
6983 btf = libbpf_find_kernel_btf();
6985 pr_warn("vmlinux BTF is not found\n");
6989 err = __find_vmlinux_btf_id(btf, name, attach_type);
6994 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
6996 struct bpf_prog_info_linear *info_linear;
6997 struct bpf_prog_info *info;
6998 struct btf *btf = NULL;
7001 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
7002 if (IS_ERR_OR_NULL(info_linear)) {
7003 pr_warn("failed get_prog_info_linear for FD %d\n",
7007 info = &info_linear->info;
7008 if (!info->btf_id) {
7009 pr_warn("The target program doesn't have BTF\n");
7012 if (btf__get_from_id(info->btf_id, &btf)) {
7013 pr_warn("Failed to get BTF of the program\n");
7016 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
7019 pr_warn("%s is not found in prog's BTF\n", name);
7027 static int libbpf_find_attach_btf_id(struct bpf_program *prog)
7029 enum bpf_attach_type attach_type = prog->expected_attach_type;
7030 __u32 attach_prog_fd = prog->attach_prog_fd;
7031 const char *name = prog->section_name;
7037 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
7038 if (!section_defs[i].is_attach_btf)
7040 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
7043 err = libbpf_find_prog_btf_id(name + section_defs[i].len,
7046 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
7047 name + section_defs[i].len,
7051 pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
7055 int libbpf_attach_type_by_name(const char *name,
7056 enum bpf_attach_type *attach_type)
7064 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
7065 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
7067 if (!section_defs[i].is_attachable)
7069 *attach_type = section_defs[i].expected_attach_type;
7072 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
7073 type_names = libbpf_get_type_names(true);
7074 if (type_names != NULL) {
7075 pr_debug("attachable section(type) names are:%s\n", type_names);
7082 int bpf_map__fd(const struct bpf_map *map)
7084 return map ? map->fd : -EINVAL;
7087 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
7089 return map ? &map->def : ERR_PTR(-EINVAL);
7092 const char *bpf_map__name(const struct bpf_map *map)
7094 return map ? map->name : NULL;
7097 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
7099 return map ? map->btf_key_type_id : 0;
7102 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
7104 return map ? map->btf_value_type_id : 0;
7107 int bpf_map__set_priv(struct bpf_map *map, void *priv,
7108 bpf_map_clear_priv_t clear_priv)
7114 if (map->clear_priv)
7115 map->clear_priv(map, map->priv);
7119 map->clear_priv = clear_priv;
7123 void *bpf_map__priv(const struct bpf_map *map)
7125 return map ? map->priv : ERR_PTR(-EINVAL);
7128 int bpf_map__set_initial_value(struct bpf_map *map,
7129 const void *data, size_t size)
7131 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
7132 size != map->def.value_size || map->fd >= 0)
7135 memcpy(map->mmaped, data, size);
7139 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
7141 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
7144 bool bpf_map__is_internal(const struct bpf_map *map)
7146 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
7149 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
7151 map->map_ifindex = ifindex;
7154 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
7156 if (!bpf_map_type__is_map_in_map(map->def.type)) {
7157 pr_warn("error: unsupported map type\n");
7160 if (map->inner_map_fd != -1) {
7161 pr_warn("error: inner_map_fd already specified\n");
7164 map->inner_map_fd = fd;
7168 static struct bpf_map *
7169 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
7172 struct bpf_map *s, *e;
7174 if (!obj || !obj->maps)
7178 e = obj->maps + obj->nr_maps;
7180 if ((m < s) || (m >= e)) {
7181 pr_warn("error in %s: map handler doesn't belong to object\n",
7186 idx = (m - obj->maps) + i;
7187 if (idx >= obj->nr_maps || idx < 0)
7189 return &obj->maps[idx];
7193 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
7198 return __bpf_map__iter(prev, obj, 1);
7202 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
7207 return obj->maps + obj->nr_maps - 1;
7210 return __bpf_map__iter(next, obj, -1);
7214 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
7216 struct bpf_map *pos;
7218 bpf_object__for_each_map(pos, obj) {
7219 if (pos->name && !strcmp(pos->name, name))
7226 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
7228 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
7232 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
7234 return ERR_PTR(-ENOTSUP);
7237 long libbpf_get_error(const void *ptr)
7239 return PTR_ERR_OR_ZERO(ptr);
7242 int bpf_prog_load(const char *file, enum bpf_prog_type type,
7243 struct bpf_object **pobj, int *prog_fd)
7245 struct bpf_prog_load_attr attr;
7247 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
7249 attr.prog_type = type;
7250 attr.expected_attach_type = 0;
7252 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
7255 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
7256 struct bpf_object **pobj, int *prog_fd)
7258 struct bpf_object_open_attr open_attr = {};
7259 struct bpf_program *prog, *first_prog = NULL;
7260 struct bpf_object *obj;
7261 struct bpf_map *map;
7269 open_attr.file = attr->file;
7270 open_attr.prog_type = attr->prog_type;
7272 obj = bpf_object__open_xattr(&open_attr);
7273 if (IS_ERR_OR_NULL(obj))
7276 bpf_object__for_each_program(prog, obj) {
7277 enum bpf_attach_type attach_type = attr->expected_attach_type;
7279 * to preserve backwards compatibility, bpf_prog_load treats
7280 * attr->prog_type, if specified, as an override to whatever
7281 * bpf_object__open guessed
7283 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
7284 bpf_program__set_type(prog, attr->prog_type);
7285 bpf_program__set_expected_attach_type(prog,
7288 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
7290 * we haven't guessed from section name and user
7291 * didn't provide a fallback type, too bad...
7293 bpf_object__close(obj);
7297 prog->prog_ifindex = attr->ifindex;
7298 prog->log_level = attr->log_level;
7299 prog->prog_flags = attr->prog_flags;
7304 bpf_object__for_each_map(map, obj) {
7305 if (!bpf_map__is_offload_neutral(map))
7306 map->map_ifindex = attr->ifindex;
7310 pr_warn("object file doesn't contain bpf program\n");
7311 bpf_object__close(obj);
7315 err = bpf_object__load(obj);
7317 bpf_object__close(obj);
7322 *prog_fd = bpf_program__fd(first_prog);
7327 int (*detach)(struct bpf_link *link);
7328 int (*destroy)(struct bpf_link *link);
7329 char *pin_path; /* NULL, if not pinned */
7330 int fd; /* hook FD, -1 if not applicable */
7334 /* Replace link's underlying BPF program with the new one */
7335 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
7337 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
7340 /* Release "ownership" of underlying BPF resource (typically, BPF program
7341 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
7342 * link, when destructed through bpf_link__destroy() call won't attempt to
7343 * detach/unregisted that BPF resource. This is useful in situations where,
7344 * say, attached BPF program has to outlive userspace program that attached it
7345 * in the system. Depending on type of BPF program, though, there might be
7346 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
7347 * exit of userspace program doesn't trigger automatic detachment and clean up
7348 * inside the kernel.
7350 void bpf_link__disconnect(struct bpf_link *link)
7352 link->disconnected = true;
7355 int bpf_link__destroy(struct bpf_link *link)
7362 if (!link->disconnected && link->detach)
7363 err = link->detach(link);
7365 link->destroy(link);
7367 free(link->pin_path);
7373 int bpf_link__fd(const struct bpf_link *link)
7378 const char *bpf_link__pin_path(const struct bpf_link *link)
7380 return link->pin_path;
7383 static int bpf_link__detach_fd(struct bpf_link *link)
7385 return close(link->fd);
7388 struct bpf_link *bpf_link__open(const char *path)
7390 struct bpf_link *link;
7393 fd = bpf_obj_get(path);
7396 pr_warn("failed to open link at %s: %d\n", path, fd);
7400 link = calloc(1, sizeof(*link));
7403 return ERR_PTR(-ENOMEM);
7405 link->detach = &bpf_link__detach_fd;
7408 link->pin_path = strdup(path);
7409 if (!link->pin_path) {
7410 bpf_link__destroy(link);
7411 return ERR_PTR(-ENOMEM);
7417 int bpf_link__pin(struct bpf_link *link, const char *path)
7423 err = make_parent_dir(path);
7426 err = check_path(path);
7430 link->pin_path = strdup(path);
7431 if (!link->pin_path)
7434 if (bpf_obj_pin(link->fd, link->pin_path)) {
7436 zfree(&link->pin_path);
7440 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
7444 int bpf_link__unpin(struct bpf_link *link)
7448 if (!link->pin_path)
7451 err = unlink(link->pin_path);
7455 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
7456 zfree(&link->pin_path);
7460 static int bpf_link__detach_perf_event(struct bpf_link *link)
7464 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
7472 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
7475 char errmsg[STRERR_BUFSIZE];
7476 struct bpf_link *link;
7480 pr_warn("program '%s': invalid perf event FD %d\n",
7481 bpf_program__title(prog, false), pfd);
7482 return ERR_PTR(-EINVAL);
7484 prog_fd = bpf_program__fd(prog);
7486 pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
7487 bpf_program__title(prog, false));
7488 return ERR_PTR(-EINVAL);
7491 link = calloc(1, sizeof(*link));
7493 return ERR_PTR(-ENOMEM);
7494 link->detach = &bpf_link__detach_perf_event;
7497 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
7500 pr_warn("program '%s': failed to attach to pfd %d: %s\n",
7501 bpf_program__title(prog, false), pfd,
7502 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7503 return ERR_PTR(err);
7505 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
7508 pr_warn("program '%s': failed to enable pfd %d: %s\n",
7509 bpf_program__title(prog, false), pfd,
7510 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7511 return ERR_PTR(err);
7517 * this function is expected to parse integer in the range of [0, 2^31-1] from
7518 * given file using scanf format string fmt. If actual parsed value is
7519 * negative, the result might be indistinguishable from error
7521 static int parse_uint_from_file(const char *file, const char *fmt)
7523 char buf[STRERR_BUFSIZE];
7527 f = fopen(file, "r");
7530 pr_debug("failed to open '%s': %s\n", file,
7531 libbpf_strerror_r(err, buf, sizeof(buf)));
7534 err = fscanf(f, fmt, &ret);
7536 err = err == EOF ? -EIO : -errno;
7537 pr_debug("failed to parse '%s': %s\n", file,
7538 libbpf_strerror_r(err, buf, sizeof(buf)));
7546 static int determine_kprobe_perf_type(void)
7548 const char *file = "/sys/bus/event_source/devices/kprobe/type";
7550 return parse_uint_from_file(file, "%d\n");
7553 static int determine_uprobe_perf_type(void)
7555 const char *file = "/sys/bus/event_source/devices/uprobe/type";
7557 return parse_uint_from_file(file, "%d\n");
7560 static int determine_kprobe_retprobe_bit(void)
7562 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
7564 return parse_uint_from_file(file, "config:%d\n");
7567 static int determine_uprobe_retprobe_bit(void)
7569 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
7571 return parse_uint_from_file(file, "config:%d\n");
7574 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
7575 uint64_t offset, int pid)
7577 struct perf_event_attr attr = {};
7578 char errmsg[STRERR_BUFSIZE];
7581 type = uprobe ? determine_uprobe_perf_type()
7582 : determine_kprobe_perf_type();
7584 pr_warn("failed to determine %s perf type: %s\n",
7585 uprobe ? "uprobe" : "kprobe",
7586 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
7590 int bit = uprobe ? determine_uprobe_retprobe_bit()
7591 : determine_kprobe_retprobe_bit();
7594 pr_warn("failed to determine %s retprobe bit: %s\n",
7595 uprobe ? "uprobe" : "kprobe",
7596 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
7599 attr.config |= 1 << bit;
7601 attr.size = sizeof(attr);
7603 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
7604 attr.config2 = offset; /* kprobe_addr or probe_offset */
7606 /* pid filter is meaningful only for uprobes */
7607 pfd = syscall(__NR_perf_event_open, &attr,
7608 pid < 0 ? -1 : pid /* pid */,
7609 pid == -1 ? 0 : -1 /* cpu */,
7610 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
7613 pr_warn("%s perf_event_open() failed: %s\n",
7614 uprobe ? "uprobe" : "kprobe",
7615 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7621 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
7623 const char *func_name)
7625 char errmsg[STRERR_BUFSIZE];
7626 struct bpf_link *link;
7629 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
7630 0 /* offset */, -1 /* pid */);
7632 pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
7633 bpf_program__title(prog, false),
7634 retprobe ? "kretprobe" : "kprobe", func_name,
7635 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
7636 return ERR_PTR(pfd);
7638 link = bpf_program__attach_perf_event(prog, pfd);
7641 err = PTR_ERR(link);
7642 pr_warn("program '%s': failed to attach to %s '%s': %s\n",
7643 bpf_program__title(prog, false),
7644 retprobe ? "kretprobe" : "kprobe", func_name,
7645 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7651 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
7652 struct bpf_program *prog)
7654 const char *func_name;
7657 func_name = bpf_program__title(prog, false) + sec->len;
7658 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
7660 return bpf_program__attach_kprobe(prog, retprobe, func_name);
7663 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
7664 bool retprobe, pid_t pid,
7665 const char *binary_path,
7668 char errmsg[STRERR_BUFSIZE];
7669 struct bpf_link *link;
7672 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
7673 binary_path, func_offset, pid);
7675 pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
7676 bpf_program__title(prog, false),
7677 retprobe ? "uretprobe" : "uprobe",
7678 binary_path, func_offset,
7679 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
7680 return ERR_PTR(pfd);
7682 link = bpf_program__attach_perf_event(prog, pfd);
7685 err = PTR_ERR(link);
7686 pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
7687 bpf_program__title(prog, false),
7688 retprobe ? "uretprobe" : "uprobe",
7689 binary_path, func_offset,
7690 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7696 static int determine_tracepoint_id(const char *tp_category,
7697 const char *tp_name)
7699 char file[PATH_MAX];
7702 ret = snprintf(file, sizeof(file),
7703 "/sys/kernel/debug/tracing/events/%s/%s/id",
7704 tp_category, tp_name);
7707 if (ret >= sizeof(file)) {
7708 pr_debug("tracepoint %s/%s path is too long\n",
7709 tp_category, tp_name);
7712 return parse_uint_from_file(file, "%d\n");
7715 static int perf_event_open_tracepoint(const char *tp_category,
7716 const char *tp_name)
7718 struct perf_event_attr attr = {};
7719 char errmsg[STRERR_BUFSIZE];
7720 int tp_id, pfd, err;
7722 tp_id = determine_tracepoint_id(tp_category, tp_name);
7724 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
7725 tp_category, tp_name,
7726 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
7730 attr.type = PERF_TYPE_TRACEPOINT;
7731 attr.size = sizeof(attr);
7732 attr.config = tp_id;
7734 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
7735 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
7738 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
7739 tp_category, tp_name,
7740 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7746 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
7747 const char *tp_category,
7748 const char *tp_name)
7750 char errmsg[STRERR_BUFSIZE];
7751 struct bpf_link *link;
7754 pfd = perf_event_open_tracepoint(tp_category, tp_name);
7756 pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
7757 bpf_program__title(prog, false),
7758 tp_category, tp_name,
7759 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
7760 return ERR_PTR(pfd);
7762 link = bpf_program__attach_perf_event(prog, pfd);
7765 err = PTR_ERR(link);
7766 pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
7767 bpf_program__title(prog, false),
7768 tp_category, tp_name,
7769 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7775 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
7776 struct bpf_program *prog)
7778 char *sec_name, *tp_cat, *tp_name;
7779 struct bpf_link *link;
7781 sec_name = strdup(bpf_program__title(prog, false));
7783 return ERR_PTR(-ENOMEM);
7785 /* extract "tp/<category>/<name>" */
7786 tp_cat = sec_name + sec->len;
7787 tp_name = strchr(tp_cat, '/');
7789 link = ERR_PTR(-EINVAL);
7795 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
7801 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
7802 const char *tp_name)
7804 char errmsg[STRERR_BUFSIZE];
7805 struct bpf_link *link;
7808 prog_fd = bpf_program__fd(prog);
7810 pr_warn("program '%s': can't attach before loaded\n",
7811 bpf_program__title(prog, false));
7812 return ERR_PTR(-EINVAL);
7815 link = calloc(1, sizeof(*link));
7817 return ERR_PTR(-ENOMEM);
7818 link->detach = &bpf_link__detach_fd;
7820 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
7824 pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
7825 bpf_program__title(prog, false), tp_name,
7826 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
7827 return ERR_PTR(pfd);
7833 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
7834 struct bpf_program *prog)
7836 const char *tp_name = bpf_program__title(prog, false) + sec->len;
7838 return bpf_program__attach_raw_tracepoint(prog, tp_name);
7841 /* Common logic for all BPF program types that attach to a btf_id */
7842 static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
7844 char errmsg[STRERR_BUFSIZE];
7845 struct bpf_link *link;
7848 prog_fd = bpf_program__fd(prog);
7850 pr_warn("program '%s': can't attach before loaded\n",
7851 bpf_program__title(prog, false));
7852 return ERR_PTR(-EINVAL);
7855 link = calloc(1, sizeof(*link));
7857 return ERR_PTR(-ENOMEM);
7858 link->detach = &bpf_link__detach_fd;
7860 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
7864 pr_warn("program '%s': failed to attach: %s\n",
7865 bpf_program__title(prog, false),
7866 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
7867 return ERR_PTR(pfd);
7870 return (struct bpf_link *)link;
7873 struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
7875 return bpf_program__attach_btf_id(prog);
7878 struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
7880 return bpf_program__attach_btf_id(prog);
7883 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
7884 struct bpf_program *prog)
7886 return bpf_program__attach_trace(prog);
7889 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
7890 struct bpf_program *prog)
7892 return bpf_program__attach_lsm(prog);
7895 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
7896 struct bpf_program *prog)
7898 return bpf_program__attach_iter(prog, NULL);
7901 static struct bpf_link *
7902 bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
7903 const char *target_name)
7905 enum bpf_attach_type attach_type;
7906 char errmsg[STRERR_BUFSIZE];
7907 struct bpf_link *link;
7908 int prog_fd, link_fd;
7910 prog_fd = bpf_program__fd(prog);
7912 pr_warn("program '%s': can't attach before loaded\n",
7913 bpf_program__title(prog, false));
7914 return ERR_PTR(-EINVAL);
7917 link = calloc(1, sizeof(*link));
7919 return ERR_PTR(-ENOMEM);
7920 link->detach = &bpf_link__detach_fd;
7922 attach_type = bpf_program__get_expected_attach_type(prog);
7923 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, NULL);
7927 pr_warn("program '%s': failed to attach to %s: %s\n",
7928 bpf_program__title(prog, false), target_name,
7929 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
7930 return ERR_PTR(link_fd);
7937 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
7939 return bpf_program__attach_fd(prog, cgroup_fd, "cgroup");
7943 bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
7945 return bpf_program__attach_fd(prog, netns_fd, "netns");
7949 bpf_program__attach_iter(struct bpf_program *prog,
7950 const struct bpf_iter_attach_opts *opts)
7952 char errmsg[STRERR_BUFSIZE];
7953 struct bpf_link *link;
7954 int prog_fd, link_fd;
7956 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
7957 return ERR_PTR(-EINVAL);
7959 prog_fd = bpf_program__fd(prog);
7961 pr_warn("program '%s': can't attach before loaded\n",
7962 bpf_program__title(prog, false));
7963 return ERR_PTR(-EINVAL);
7966 link = calloc(1, sizeof(*link));
7968 return ERR_PTR(-ENOMEM);
7969 link->detach = &bpf_link__detach_fd;
7971 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_ITER, NULL);
7975 pr_warn("program '%s': failed to attach to iterator: %s\n",
7976 bpf_program__title(prog, false),
7977 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
7978 return ERR_PTR(link_fd);
7984 struct bpf_link *bpf_program__attach(struct bpf_program *prog)
7986 const struct bpf_sec_def *sec_def;
7988 sec_def = find_sec_def(bpf_program__title(prog, false));
7989 if (!sec_def || !sec_def->attach_fn)
7990 return ERR_PTR(-ESRCH);
7992 return sec_def->attach_fn(sec_def, prog);
7995 static int bpf_link__detach_struct_ops(struct bpf_link *link)
7999 if (bpf_map_delete_elem(link->fd, &zero))
8005 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
8007 struct bpf_struct_ops *st_ops;
8008 struct bpf_link *link;
8012 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
8013 return ERR_PTR(-EINVAL);
8015 link = calloc(1, sizeof(*link));
8017 return ERR_PTR(-EINVAL);
8019 st_ops = map->st_ops;
8020 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8021 struct bpf_program *prog = st_ops->progs[i];
8028 prog_fd = bpf_program__fd(prog);
8029 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8030 *(unsigned long *)kern_data = prog_fd;
8033 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
8037 return ERR_PTR(err);
8040 link->detach = bpf_link__detach_struct_ops;
8046 enum bpf_perf_event_ret
8047 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
8048 void **copy_mem, size_t *copy_size,
8049 bpf_perf_event_print_t fn, void *private_data)
8051 struct perf_event_mmap_page *header = mmap_mem;
8052 __u64 data_head = ring_buffer_read_head(header);
8053 __u64 data_tail = header->data_tail;
8054 void *base = ((__u8 *)header) + page_size;
8055 int ret = LIBBPF_PERF_EVENT_CONT;
8056 struct perf_event_header *ehdr;
8059 while (data_head != data_tail) {
8060 ehdr = base + (data_tail & (mmap_size - 1));
8061 ehdr_size = ehdr->size;
8063 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
8064 void *copy_start = ehdr;
8065 size_t len_first = base + mmap_size - copy_start;
8066 size_t len_secnd = ehdr_size - len_first;
8068 if (*copy_size < ehdr_size) {
8070 *copy_mem = malloc(ehdr_size);
8073 ret = LIBBPF_PERF_EVENT_ERROR;
8076 *copy_size = ehdr_size;
8079 memcpy(*copy_mem, copy_start, len_first);
8080 memcpy(*copy_mem + len_first, base, len_secnd);
8084 ret = fn(ehdr, private_data);
8085 data_tail += ehdr_size;
8086 if (ret != LIBBPF_PERF_EVENT_CONT)
8090 ring_buffer_write_tail(header, data_tail);
8096 struct perf_buffer_params {
8097 struct perf_event_attr *attr;
8098 /* if event_cb is specified, it takes precendence */
8099 perf_buffer_event_fn event_cb;
8100 /* sample_cb and lost_cb are higher-level common-case callbacks */
8101 perf_buffer_sample_fn sample_cb;
8102 perf_buffer_lost_fn lost_cb;
8109 struct perf_cpu_buf {
8110 struct perf_buffer *pb;
8111 void *base; /* mmap()'ed memory */
8112 void *buf; /* for reconstructing segmented data */
8119 struct perf_buffer {
8120 perf_buffer_event_fn event_cb;
8121 perf_buffer_sample_fn sample_cb;
8122 perf_buffer_lost_fn lost_cb;
8123 void *ctx; /* passed into callbacks */
8127 struct perf_cpu_buf **cpu_bufs;
8128 struct epoll_event *events;
8129 int cpu_cnt; /* number of allocated CPU buffers */
8130 int epoll_fd; /* perf event FD */
8131 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
8134 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
8135 struct perf_cpu_buf *cpu_buf)
8139 if (cpu_buf->base &&
8140 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
8141 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
8142 if (cpu_buf->fd >= 0) {
8143 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
8150 void perf_buffer__free(struct perf_buffer *pb)
8157 for (i = 0; i < pb->cpu_cnt; i++) {
8158 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
8163 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
8164 perf_buffer__free_cpu_buf(pb, cpu_buf);
8168 if (pb->epoll_fd >= 0)
8169 close(pb->epoll_fd);
8174 static struct perf_cpu_buf *
8175 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
8176 int cpu, int map_key)
8178 struct perf_cpu_buf *cpu_buf;
8179 char msg[STRERR_BUFSIZE];
8182 cpu_buf = calloc(1, sizeof(*cpu_buf));
8184 return ERR_PTR(-ENOMEM);
8188 cpu_buf->map_key = map_key;
8190 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
8191 -1, PERF_FLAG_FD_CLOEXEC);
8192 if (cpu_buf->fd < 0) {
8194 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
8195 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
8199 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
8200 PROT_READ | PROT_WRITE, MAP_SHARED,
8202 if (cpu_buf->base == MAP_FAILED) {
8203 cpu_buf->base = NULL;
8205 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
8206 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
8210 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
8212 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
8213 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
8220 perf_buffer__free_cpu_buf(pb, cpu_buf);
8221 return (struct perf_cpu_buf *)ERR_PTR(err);
8224 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
8225 struct perf_buffer_params *p);
8227 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
8228 const struct perf_buffer_opts *opts)
8230 struct perf_buffer_params p = {};
8231 struct perf_event_attr attr = { 0, };
8233 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
8234 attr.type = PERF_TYPE_SOFTWARE;
8235 attr.sample_type = PERF_SAMPLE_RAW;
8236 attr.sample_period = 1;
8237 attr.wakeup_events = 1;
8240 p.sample_cb = opts ? opts->sample_cb : NULL;
8241 p.lost_cb = opts ? opts->lost_cb : NULL;
8242 p.ctx = opts ? opts->ctx : NULL;
8244 return __perf_buffer__new(map_fd, page_cnt, &p);
8247 struct perf_buffer *
8248 perf_buffer__new_raw(int map_fd, size_t page_cnt,
8249 const struct perf_buffer_raw_opts *opts)
8251 struct perf_buffer_params p = {};
8253 p.attr = opts->attr;
8254 p.event_cb = opts->event_cb;
8256 p.cpu_cnt = opts->cpu_cnt;
8257 p.cpus = opts->cpus;
8258 p.map_keys = opts->map_keys;
8260 return __perf_buffer__new(map_fd, page_cnt, &p);
8263 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
8264 struct perf_buffer_params *p)
8266 const char *online_cpus_file = "/sys/devices/system/cpu/online";
8267 struct bpf_map_info map = {};
8268 char msg[STRERR_BUFSIZE];
8269 struct perf_buffer *pb;
8270 bool *online = NULL;
8274 if (page_cnt & (page_cnt - 1)) {
8275 pr_warn("page count should be power of two, but is %zu\n",
8277 return ERR_PTR(-EINVAL);
8280 map_info_len = sizeof(map);
8281 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
8284 pr_warn("failed to get map info for map FD %d: %s\n",
8285 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
8286 return ERR_PTR(err);
8289 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
8290 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
8292 return ERR_PTR(-EINVAL);
8295 pb = calloc(1, sizeof(*pb));
8297 return ERR_PTR(-ENOMEM);
8299 pb->event_cb = p->event_cb;
8300 pb->sample_cb = p->sample_cb;
8301 pb->lost_cb = p->lost_cb;
8304 pb->page_size = getpagesize();
8305 pb->mmap_size = pb->page_size * page_cnt;
8306 pb->map_fd = map_fd;
8308 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
8309 if (pb->epoll_fd < 0) {
8311 pr_warn("failed to create epoll instance: %s\n",
8312 libbpf_strerror_r(err, msg, sizeof(msg)));
8316 if (p->cpu_cnt > 0) {
8317 pb->cpu_cnt = p->cpu_cnt;
8319 pb->cpu_cnt = libbpf_num_possible_cpus();
8320 if (pb->cpu_cnt < 0) {
8324 if (map.max_entries < pb->cpu_cnt)
8325 pb->cpu_cnt = map.max_entries;
8328 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
8331 pr_warn("failed to allocate events: out of memory\n");
8334 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
8335 if (!pb->cpu_bufs) {
8337 pr_warn("failed to allocate buffers: out of memory\n");
8341 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
8343 pr_warn("failed to get online CPU mask: %d\n", err);
8347 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
8348 struct perf_cpu_buf *cpu_buf;
8351 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
8352 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
8354 /* in case user didn't explicitly requested particular CPUs to
8355 * be attached to, skip offline/not present CPUs
8357 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
8360 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
8361 if (IS_ERR(cpu_buf)) {
8362 err = PTR_ERR(cpu_buf);
8366 pb->cpu_bufs[j] = cpu_buf;
8368 err = bpf_map_update_elem(pb->map_fd, &map_key,
8372 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
8373 cpu, map_key, cpu_buf->fd,
8374 libbpf_strerror_r(err, msg, sizeof(msg)));
8378 pb->events[j].events = EPOLLIN;
8379 pb->events[j].data.ptr = cpu_buf;
8380 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
8381 &pb->events[j]) < 0) {
8383 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
8385 libbpf_strerror_r(err, msg, sizeof(msg)));
8398 perf_buffer__free(pb);
8399 return ERR_PTR(err);
8402 struct perf_sample_raw {
8403 struct perf_event_header header;
8408 struct perf_sample_lost {
8409 struct perf_event_header header;
8415 static enum bpf_perf_event_ret
8416 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
8418 struct perf_cpu_buf *cpu_buf = ctx;
8419 struct perf_buffer *pb = cpu_buf->pb;
8422 /* user wants full control over parsing perf event */
8424 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
8427 case PERF_RECORD_SAMPLE: {
8428 struct perf_sample_raw *s = data;
8431 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
8434 case PERF_RECORD_LOST: {
8435 struct perf_sample_lost *s = data;
8438 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
8442 pr_warn("unknown perf sample type %d\n", e->type);
8443 return LIBBPF_PERF_EVENT_ERROR;
8445 return LIBBPF_PERF_EVENT_CONT;
8448 static int perf_buffer__process_records(struct perf_buffer *pb,
8449 struct perf_cpu_buf *cpu_buf)
8451 enum bpf_perf_event_ret ret;
8453 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
8454 pb->page_size, &cpu_buf->buf,
8456 perf_buffer__process_record, cpu_buf);
8457 if (ret != LIBBPF_PERF_EVENT_CONT)
8462 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
8466 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
8467 for (i = 0; i < cnt; i++) {
8468 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
8470 err = perf_buffer__process_records(pb, cpu_buf);
8472 pr_warn("error while processing records: %d\n", err);
8476 return cnt < 0 ? -errno : cnt;
8479 int perf_buffer__consume(struct perf_buffer *pb)
8483 for (i = 0; i < pb->cpu_cnt; i++) {
8484 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
8489 err = perf_buffer__process_records(pb, cpu_buf);
8491 pr_warn("error while processing records: %d\n", err);
8498 struct bpf_prog_info_array_desc {
8499 int array_offset; /* e.g. offset of jited_prog_insns */
8500 int count_offset; /* e.g. offset of jited_prog_len */
8501 int size_offset; /* > 0: offset of rec size,
8502 * < 0: fix size of -size_offset
8506 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
8507 [BPF_PROG_INFO_JITED_INSNS] = {
8508 offsetof(struct bpf_prog_info, jited_prog_insns),
8509 offsetof(struct bpf_prog_info, jited_prog_len),
8512 [BPF_PROG_INFO_XLATED_INSNS] = {
8513 offsetof(struct bpf_prog_info, xlated_prog_insns),
8514 offsetof(struct bpf_prog_info, xlated_prog_len),
8517 [BPF_PROG_INFO_MAP_IDS] = {
8518 offsetof(struct bpf_prog_info, map_ids),
8519 offsetof(struct bpf_prog_info, nr_map_ids),
8520 -(int)sizeof(__u32),
8522 [BPF_PROG_INFO_JITED_KSYMS] = {
8523 offsetof(struct bpf_prog_info, jited_ksyms),
8524 offsetof(struct bpf_prog_info, nr_jited_ksyms),
8525 -(int)sizeof(__u64),
8527 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
8528 offsetof(struct bpf_prog_info, jited_func_lens),
8529 offsetof(struct bpf_prog_info, nr_jited_func_lens),
8530 -(int)sizeof(__u32),
8532 [BPF_PROG_INFO_FUNC_INFO] = {
8533 offsetof(struct bpf_prog_info, func_info),
8534 offsetof(struct bpf_prog_info, nr_func_info),
8535 offsetof(struct bpf_prog_info, func_info_rec_size),
8537 [BPF_PROG_INFO_LINE_INFO] = {
8538 offsetof(struct bpf_prog_info, line_info),
8539 offsetof(struct bpf_prog_info, nr_line_info),
8540 offsetof(struct bpf_prog_info, line_info_rec_size),
8542 [BPF_PROG_INFO_JITED_LINE_INFO] = {
8543 offsetof(struct bpf_prog_info, jited_line_info),
8544 offsetof(struct bpf_prog_info, nr_jited_line_info),
8545 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
8547 [BPF_PROG_INFO_PROG_TAGS] = {
8548 offsetof(struct bpf_prog_info, prog_tags),
8549 offsetof(struct bpf_prog_info, nr_prog_tags),
8550 -(int)sizeof(__u8) * BPF_TAG_SIZE,
8555 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
8558 __u32 *array = (__u32 *)info;
8561 return array[offset / sizeof(__u32)];
8562 return -(int)offset;
8565 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
8568 __u64 *array = (__u64 *)info;
8571 return array[offset / sizeof(__u64)];
8572 return -(int)offset;
8575 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
8578 __u32 *array = (__u32 *)info;
8581 array[offset / sizeof(__u32)] = val;
8584 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
8587 __u64 *array = (__u64 *)info;
8590 array[offset / sizeof(__u64)] = val;
8593 struct bpf_prog_info_linear *
8594 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
8596 struct bpf_prog_info_linear *info_linear;
8597 struct bpf_prog_info info = {};
8598 __u32 info_len = sizeof(info);
8603 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
8604 return ERR_PTR(-EINVAL);
8606 /* step 1: get array dimensions */
8607 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
8609 pr_debug("can't get prog info: %s", strerror(errno));
8610 return ERR_PTR(-EFAULT);
8613 /* step 2: calculate total size of all arrays */
8614 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8615 bool include_array = (arrays & (1UL << i)) > 0;
8616 struct bpf_prog_info_array_desc *desc;
8619 desc = bpf_prog_info_array_desc + i;
8621 /* kernel is too old to support this field */
8622 if (info_len < desc->array_offset + sizeof(__u32) ||
8623 info_len < desc->count_offset + sizeof(__u32) ||
8624 (desc->size_offset > 0 && info_len < desc->size_offset))
8625 include_array = false;
8627 if (!include_array) {
8628 arrays &= ~(1UL << i); /* clear the bit */
8632 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
8633 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
8635 data_len += count * size;
8638 /* step 3: allocate continuous memory */
8639 data_len = roundup(data_len, sizeof(__u64));
8640 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
8642 return ERR_PTR(-ENOMEM);
8644 /* step 4: fill data to info_linear->info */
8645 info_linear->arrays = arrays;
8646 memset(&info_linear->info, 0, sizeof(info));
8647 ptr = info_linear->data;
8649 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8650 struct bpf_prog_info_array_desc *desc;
8653 if ((arrays & (1UL << i)) == 0)
8656 desc = bpf_prog_info_array_desc + i;
8657 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
8658 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
8659 bpf_prog_info_set_offset_u32(&info_linear->info,
8660 desc->count_offset, count);
8661 bpf_prog_info_set_offset_u32(&info_linear->info,
8662 desc->size_offset, size);
8663 bpf_prog_info_set_offset_u64(&info_linear->info,
8666 ptr += count * size;
8669 /* step 5: call syscall again to get required arrays */
8670 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
8672 pr_debug("can't get prog info: %s", strerror(errno));
8674 return ERR_PTR(-EFAULT);
8677 /* step 6: verify the data */
8678 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8679 struct bpf_prog_info_array_desc *desc;
8682 if ((arrays & (1UL << i)) == 0)
8685 desc = bpf_prog_info_array_desc + i;
8686 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
8687 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
8688 desc->count_offset);
8690 pr_warn("%s: mismatch in element count\n", __func__);
8692 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
8693 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
8696 pr_warn("%s: mismatch in rec size\n", __func__);
8699 /* step 7: update info_len and data_len */
8700 info_linear->info_len = sizeof(struct bpf_prog_info);
8701 info_linear->data_len = data_len;
8706 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
8710 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8711 struct bpf_prog_info_array_desc *desc;
8714 if ((info_linear->arrays & (1UL << i)) == 0)
8717 desc = bpf_prog_info_array_desc + i;
8718 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
8719 desc->array_offset);
8720 offs = addr - ptr_to_u64(info_linear->data);
8721 bpf_prog_info_set_offset_u64(&info_linear->info,
8722 desc->array_offset, offs);
8726 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
8730 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8731 struct bpf_prog_info_array_desc *desc;
8734 if ((info_linear->arrays & (1UL << i)) == 0)
8737 desc = bpf_prog_info_array_desc + i;
8738 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
8739 desc->array_offset);
8740 addr = offs + ptr_to_u64(info_linear->data);
8741 bpf_prog_info_set_offset_u64(&info_linear->info,
8742 desc->array_offset, addr);
8746 int bpf_program__set_attach_target(struct bpf_program *prog,
8748 const char *attach_func_name)
8752 if (!prog || attach_prog_fd < 0 || !attach_func_name)
8756 btf_id = libbpf_find_prog_btf_id(attach_func_name,
8759 btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
8761 prog->expected_attach_type);
8766 prog->attach_btf_id = btf_id;
8767 prog->attach_prog_fd = attach_prog_fd;
8771 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
8773 int err = 0, n, len, start, end = -1;
8779 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
8781 if (*s == ',' || *s == '\n') {
8785 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
8786 if (n <= 0 || n > 2) {
8787 pr_warn("Failed to get CPU range %s: %d\n", s, n);
8790 } else if (n == 1) {
8793 if (start < 0 || start > end) {
8794 pr_warn("Invalid CPU range [%d,%d] in %s\n",
8799 tmp = realloc(*mask, end + 1);
8805 memset(tmp + *mask_sz, 0, start - *mask_sz);
8806 memset(tmp + start, 1, end - start + 1);
8811 pr_warn("Empty CPU range\n");
8821 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
8823 int fd, err = 0, len;
8826 fd = open(fcpu, O_RDONLY);
8829 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
8832 len = read(fd, buf, sizeof(buf));
8835 err = len ? -errno : -EINVAL;
8836 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
8839 if (len >= sizeof(buf)) {
8840 pr_warn("CPU mask is too big in file %s\n", fcpu);
8845 return parse_cpu_mask_str(buf, mask, mask_sz);
8848 int libbpf_num_possible_cpus(void)
8850 static const char *fcpu = "/sys/devices/system/cpu/possible";
8852 int err, n, i, tmp_cpus;
8855 tmp_cpus = READ_ONCE(cpus);
8859 err = parse_cpu_mask_file(fcpu, &mask, &n);
8864 for (i = 0; i < n; i++) {
8870 WRITE_ONCE(cpus, tmp_cpus);
8874 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
8875 const struct bpf_object_open_opts *opts)
8877 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
8878 .object_name = s->name,
8880 struct bpf_object *obj;
8883 /* Attempt to preserve opts->object_name, unless overriden by user
8884 * explicitly. Overwriting object name for skeletons is discouraged,
8885 * as it breaks global data maps, because they contain object name
8886 * prefix as their own map name prefix. When skeleton is generated,
8887 * bpftool is making an assumption that this name will stay the same.
8890 memcpy(&skel_opts, opts, sizeof(*opts));
8891 if (!opts->object_name)
8892 skel_opts.object_name = s->name;
8895 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
8897 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
8898 s->name, PTR_ERR(obj));
8899 return PTR_ERR(obj);
8904 for (i = 0; i < s->map_cnt; i++) {
8905 struct bpf_map **map = s->maps[i].map;
8906 const char *name = s->maps[i].name;
8907 void **mmaped = s->maps[i].mmaped;
8909 *map = bpf_object__find_map_by_name(obj, name);
8911 pr_warn("failed to find skeleton map '%s'\n", name);
8915 /* externs shouldn't be pre-setup from user code */
8916 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
8917 *mmaped = (*map)->mmaped;
8920 for (i = 0; i < s->prog_cnt; i++) {
8921 struct bpf_program **prog = s->progs[i].prog;
8922 const char *name = s->progs[i].name;
8924 *prog = bpf_object__find_program_by_name(obj, name);
8926 pr_warn("failed to find skeleton program '%s'\n", name);
8934 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
8938 err = bpf_object__load(*s->obj);
8940 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
8944 for (i = 0; i < s->map_cnt; i++) {
8945 struct bpf_map *map = *s->maps[i].map;
8946 size_t mmap_sz = bpf_map_mmap_sz(map);
8947 int prot, map_fd = bpf_map__fd(map);
8948 void **mmaped = s->maps[i].mmaped;
8953 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
8958 if (map->def.map_flags & BPF_F_RDONLY_PROG)
8961 prot = PROT_READ | PROT_WRITE;
8963 /* Remap anonymous mmap()-ed "map initialization image" as
8964 * a BPF map-backed mmap()-ed memory, but preserving the same
8965 * memory address. This will cause kernel to change process'
8966 * page table to point to a different piece of kernel memory,
8967 * but from userspace point of view memory address (and its
8968 * contents, being identical at this point) will stay the
8969 * same. This mapping will be released by bpf_object__close()
8970 * as per normal clean up procedure, so we don't need to worry
8971 * about it from skeleton's clean up perspective.
8973 *mmaped = mmap(map->mmaped, mmap_sz, prot,
8974 MAP_SHARED | MAP_FIXED, map_fd, 0);
8975 if (*mmaped == MAP_FAILED) {
8978 pr_warn("failed to re-mmap() map '%s': %d\n",
8979 bpf_map__name(map), err);
8987 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
8991 for (i = 0; i < s->prog_cnt; i++) {
8992 struct bpf_program *prog = *s->progs[i].prog;
8993 struct bpf_link **link = s->progs[i].link;
8994 const struct bpf_sec_def *sec_def;
8995 const char *sec_name = bpf_program__title(prog, false);
8997 sec_def = find_sec_def(sec_name);
8998 if (!sec_def || !sec_def->attach_fn)
9001 *link = sec_def->attach_fn(sec_def, prog);
9002 if (IS_ERR(*link)) {
9003 pr_warn("failed to auto-attach program '%s': %ld\n",
9004 bpf_program__name(prog), PTR_ERR(*link));
9005 return PTR_ERR(*link);
9012 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
9016 for (i = 0; i < s->prog_cnt; i++) {
9017 struct bpf_link **link = s->progs[i].link;
9019 if (!IS_ERR_OR_NULL(*link))
9020 bpf_link__destroy(*link);
9025 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
9028 bpf_object__detach_skeleton(s);
9030 bpf_object__close(*s->obj);