1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
12 #include <sys/utsname.h>
13 #include <sys/param.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
22 #include "libbpf_internal.h"
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 static struct btf_type btf_void;
32 /* raw BTF data in native endianness */
34 /* raw BTF data in non-native endianness */
35 void *raw_data_swapped;
37 /* whether target endianness differs from the native one */
41 * When BTF is loaded from an ELF or raw memory it is stored
42 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 * point inside that memory region to their respective parts of BTF
46 * +--------------------------------+
47 * | Header | Types | Strings |
48 * +--------------------------------+
53 * strs_data------------+
55 * If BTF data is later modified, e.g., due to types added or
56 * removed, BTF deduplication performed, etc, this contiguous
57 * representation is broken up into three independently allocated
58 * memory regions to be able to modify them independently.
59 * raw_data is nulled out at that point, but can be later allocated
60 * and cached again if user calls btf__raw_data(), at which point
61 * raw_data will contain a contiguous copy of header, types, and
64 * +----------+ +---------+ +-----------+
65 * | Header | | Types | | Strings |
66 * +----------+ +---------+ +-----------+
71 * strset__data(strs_set)-----+
73 * +----------+---------+-----------+
74 * | Header | Types | Strings |
75 * raw_data----->+----------+---------+-----------+
77 struct btf_header *hdr;
80 size_t types_data_cap; /* used size stored in hdr->type_len */
82 /* type ID to `struct btf_type *` lookup index
83 * type_offs[0] corresponds to the first non-VOID type:
84 * - for base BTF it's type [1];
85 * - for split BTF it's the first non-base BTF type.
89 /* number of types in this BTF instance:
90 * - doesn't include special [0] void type;
91 * - for split BTF counts number of types added on top of base BTF.
94 /* if not NULL, points to the base BTF on top of which the current
98 /* BTF type ID of the first type in this BTF instance:
99 * - for base BTF it's equal to 1;
100 * - for split BTF it's equal to biggest type ID of base BTF plus 1.
103 /* logical string offset of this BTF instance:
104 * - for base BTF it's equal to 0;
105 * - for split BTF it's equal to total size of base BTF's string section size.
109 /* only one of strs_data or strs_set can be non-NULL, depending on
110 * whether BTF is in a modifiable state (strs_set is used) or not
111 * (strs_data points inside raw_data)
114 /* a set of unique strings */
115 struct strset *strs_set;
116 /* whether strings are already deduplicated */
119 /* BTF object FD, if loaded into kernel */
122 /* Pointer size (in bytes) for a target architecture of this BTF */
126 static inline __u64 ptr_to_u64(const void *ptr)
128 return (__u64) (unsigned long) ptr;
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132 * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133 * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
134 * are already used. At most *max_cnt* elements can be ever allocated.
135 * If necessary, memory is reallocated and all existing data is copied over,
136 * new pointer to the memory region is stored at *data, new memory region
137 * capacity (in number of elements) is stored in *cap.
138 * On success, memory pointer to the beginning of unused memory is returned.
139 * On error, NULL is returned.
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 size_t cur_cnt, size_t max_cnt, size_t add_cnt)
147 if (cur_cnt + add_cnt <= *cap_cnt)
148 return *data + cur_cnt * elem_sz;
150 /* requested more than the set limit */
151 if (cur_cnt + add_cnt > max_cnt)
155 new_cnt += new_cnt / 4; /* expand by 25% */
156 if (new_cnt < 16) /* but at least 16 elements */
158 if (new_cnt > max_cnt) /* but not exceeding a set limit */
160 if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */
161 new_cnt = cur_cnt + add_cnt;
163 new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
167 /* zero out newly allocated portion of memory */
168 memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
172 return new_data + cur_cnt * elem_sz;
175 /* Ensure given dynamically allocated memory region has enough allocated space
176 * to accommodate *need_cnt* elements of size *elem_sz* bytes each
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
182 if (need_cnt <= *cap_cnt)
185 p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
194 return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
195 btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
202 p = btf_add_type_offs_mem(btf, 1);
210 static void btf_bswap_hdr(struct btf_header *h)
212 h->magic = bswap_16(h->magic);
213 h->hdr_len = bswap_32(h->hdr_len);
214 h->type_off = bswap_32(h->type_off);
215 h->type_len = bswap_32(h->type_len);
216 h->str_off = bswap_32(h->str_off);
217 h->str_len = bswap_32(h->str_len);
220 static int btf_parse_hdr(struct btf *btf)
222 struct btf_header *hdr = btf->hdr;
225 if (btf->raw_size < sizeof(struct btf_header)) {
226 pr_debug("BTF header not found\n");
230 if (hdr->magic == bswap_16(BTF_MAGIC)) {
231 btf->swapped_endian = true;
232 if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
233 pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
234 bswap_32(hdr->hdr_len));
238 } else if (hdr->magic != BTF_MAGIC) {
239 pr_debug("Invalid BTF magic: %x\n", hdr->magic);
243 if (btf->raw_size < hdr->hdr_len) {
244 pr_debug("BTF header len %u larger than data size %u\n",
245 hdr->hdr_len, btf->raw_size);
249 meta_left = btf->raw_size - hdr->hdr_len;
250 if (meta_left < (long long)hdr->str_off + hdr->str_len) {
251 pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
255 if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
256 pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
257 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
261 if (hdr->type_off % 4) {
262 pr_debug("BTF type section is not aligned to 4 bytes\n");
269 static int btf_parse_str_sec(struct btf *btf)
271 const struct btf_header *hdr = btf->hdr;
272 const char *start = btf->strs_data;
273 const char *end = start + btf->hdr->str_len;
275 if (btf->base_btf && hdr->str_len == 0)
277 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
278 pr_debug("Invalid BTF string section\n");
281 if (!btf->base_btf && start[0]) {
282 pr_debug("Invalid BTF string section\n");
288 static int btf_type_size(const struct btf_type *t)
290 const int base_size = sizeof(struct btf_type);
291 __u16 vlen = btf_vlen(t);
293 switch (btf_kind(t)) {
296 case BTF_KIND_VOLATILE:
297 case BTF_KIND_RESTRICT:
299 case BTF_KIND_TYPEDEF:
302 case BTF_KIND_TYPE_TAG:
305 return base_size + sizeof(__u32);
307 return base_size + vlen * sizeof(struct btf_enum);
309 return base_size + sizeof(struct btf_array);
310 case BTF_KIND_STRUCT:
312 return base_size + vlen * sizeof(struct btf_member);
313 case BTF_KIND_FUNC_PROTO:
314 return base_size + vlen * sizeof(struct btf_param);
316 return base_size + sizeof(struct btf_var);
317 case BTF_KIND_DATASEC:
318 return base_size + vlen * sizeof(struct btf_var_secinfo);
319 case BTF_KIND_DECL_TAG:
320 return base_size + sizeof(struct btf_decl_tag);
322 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
327 static void btf_bswap_type_base(struct btf_type *t)
329 t->name_off = bswap_32(t->name_off);
330 t->info = bswap_32(t->info);
331 t->type = bswap_32(t->type);
334 static int btf_bswap_type_rest(struct btf_type *t)
336 struct btf_var_secinfo *v;
337 struct btf_member *m;
341 __u16 vlen = btf_vlen(t);
344 switch (btf_kind(t)) {
347 case BTF_KIND_VOLATILE:
348 case BTF_KIND_RESTRICT:
350 case BTF_KIND_TYPEDEF:
353 case BTF_KIND_TYPE_TAG:
356 *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
359 for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
360 e->name_off = bswap_32(e->name_off);
361 e->val = bswap_32(e->val);
366 a->type = bswap_32(a->type);
367 a->index_type = bswap_32(a->index_type);
368 a->nelems = bswap_32(a->nelems);
370 case BTF_KIND_STRUCT:
372 for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
373 m->name_off = bswap_32(m->name_off);
374 m->type = bswap_32(m->type);
375 m->offset = bswap_32(m->offset);
378 case BTF_KIND_FUNC_PROTO:
379 for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
380 p->name_off = bswap_32(p->name_off);
381 p->type = bswap_32(p->type);
385 btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
387 case BTF_KIND_DATASEC:
388 for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
389 v->type = bswap_32(v->type);
390 v->offset = bswap_32(v->offset);
391 v->size = bswap_32(v->size);
394 case BTF_KIND_DECL_TAG:
395 btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
398 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
403 static int btf_parse_type_sec(struct btf *btf)
405 struct btf_header *hdr = btf->hdr;
406 void *next_type = btf->types_data;
407 void *end_type = next_type + hdr->type_len;
410 while (next_type + sizeof(struct btf_type) <= end_type) {
411 if (btf->swapped_endian)
412 btf_bswap_type_base(next_type);
414 type_size = btf_type_size(next_type);
417 if (next_type + type_size > end_type) {
418 pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
422 if (btf->swapped_endian && btf_bswap_type_rest(next_type))
425 err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
429 next_type += type_size;
433 if (next_type != end_type) {
434 pr_warn("BTF types data is malformed\n");
441 __u32 btf__get_nr_types(const struct btf *btf)
443 return btf->start_id + btf->nr_types - 1;
446 __u32 btf__type_cnt(const struct btf *btf)
448 return btf->start_id + btf->nr_types;
451 const struct btf *btf__base_btf(const struct btf *btf)
453 return btf->base_btf;
456 /* internal helper returning non-const pointer to a type */
457 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
461 if (type_id < btf->start_id)
462 return btf_type_by_id(btf->base_btf, type_id);
463 return btf->types_data + btf->type_offs[type_id - btf->start_id];
466 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
468 if (type_id >= btf->start_id + btf->nr_types)
469 return errno = EINVAL, NULL;
470 return btf_type_by_id((struct btf *)btf, type_id);
473 static int determine_ptr_size(const struct btf *btf)
475 const struct btf_type *t;
479 if (btf->base_btf && btf->base_btf->ptr_sz > 0)
480 return btf->base_btf->ptr_sz;
482 n = btf__type_cnt(btf);
483 for (i = 1; i < n; i++) {
484 t = btf__type_by_id(btf, i);
488 name = btf__name_by_offset(btf, t->name_off);
492 if (strcmp(name, "long int") == 0 ||
493 strcmp(name, "long unsigned int") == 0) {
494 if (t->size != 4 && t->size != 8)
503 static size_t btf_ptr_sz(const struct btf *btf)
506 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
507 return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
510 /* Return pointer size this BTF instance assumes. The size is heuristically
511 * determined by looking for 'long' or 'unsigned long' integer type and
512 * recording its size in bytes. If BTF type information doesn't have any such
513 * type, this function returns 0. In the latter case, native architecture's
514 * pointer size is assumed, so will be either 4 or 8, depending on
515 * architecture that libbpf was compiled for. It's possible to override
516 * guessed value by using btf__set_pointer_size() API.
518 size_t btf__pointer_size(const struct btf *btf)
521 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
524 /* not enough BTF type info to guess */
530 /* Override or set pointer size in bytes. Only values of 4 and 8 are
533 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
535 if (ptr_sz != 4 && ptr_sz != 8)
536 return libbpf_err(-EINVAL);
537 btf->ptr_sz = ptr_sz;
541 static bool is_host_big_endian(void)
543 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
545 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
548 # error "Unrecognized __BYTE_ORDER__"
552 enum btf_endianness btf__endianness(const struct btf *btf)
554 if (is_host_big_endian())
555 return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
557 return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
560 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
562 if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
563 return libbpf_err(-EINVAL);
565 btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
566 if (!btf->swapped_endian) {
567 free(btf->raw_data_swapped);
568 btf->raw_data_swapped = NULL;
573 static bool btf_type_is_void(const struct btf_type *t)
575 return t == &btf_void || btf_is_fwd(t);
578 static bool btf_type_is_void_or_null(const struct btf_type *t)
580 return !t || btf_type_is_void(t);
583 #define MAX_RESOLVE_DEPTH 32
585 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
587 const struct btf_array *array;
588 const struct btf_type *t;
593 t = btf__type_by_id(btf, type_id);
594 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
595 switch (btf_kind(t)) {
597 case BTF_KIND_STRUCT:
600 case BTF_KIND_DATASEC:
605 size = btf_ptr_sz(btf);
607 case BTF_KIND_TYPEDEF:
608 case BTF_KIND_VOLATILE:
610 case BTF_KIND_RESTRICT:
612 case BTF_KIND_DECL_TAG:
613 case BTF_KIND_TYPE_TAG:
617 array = btf_array(t);
618 if (nelems && array->nelems > UINT32_MAX / nelems)
619 return libbpf_err(-E2BIG);
620 nelems *= array->nelems;
621 type_id = array->type;
624 return libbpf_err(-EINVAL);
627 t = btf__type_by_id(btf, type_id);
632 return libbpf_err(-EINVAL);
633 if (nelems && size > UINT32_MAX / nelems)
634 return libbpf_err(-E2BIG);
636 return nelems * size;
639 int btf__align_of(const struct btf *btf, __u32 id)
641 const struct btf_type *t = btf__type_by_id(btf, id);
642 __u16 kind = btf_kind(t);
648 return min(btf_ptr_sz(btf), (size_t)t->size);
650 return btf_ptr_sz(btf);
651 case BTF_KIND_TYPEDEF:
652 case BTF_KIND_VOLATILE:
654 case BTF_KIND_RESTRICT:
655 case BTF_KIND_TYPE_TAG:
656 return btf__align_of(btf, t->type);
658 return btf__align_of(btf, btf_array(t)->type);
659 case BTF_KIND_STRUCT:
660 case BTF_KIND_UNION: {
661 const struct btf_member *m = btf_members(t);
662 __u16 vlen = btf_vlen(t);
663 int i, max_align = 1, align;
665 for (i = 0; i < vlen; i++, m++) {
666 align = btf__align_of(btf, m->type);
668 return libbpf_err(align);
669 max_align = max(max_align, align);
675 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
676 return errno = EINVAL, 0;
680 int btf__resolve_type(const struct btf *btf, __u32 type_id)
682 const struct btf_type *t;
685 t = btf__type_by_id(btf, type_id);
686 while (depth < MAX_RESOLVE_DEPTH &&
687 !btf_type_is_void_or_null(t) &&
688 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
690 t = btf__type_by_id(btf, type_id);
694 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
695 return libbpf_err(-EINVAL);
700 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
702 __u32 i, nr_types = btf__type_cnt(btf);
704 if (!strcmp(type_name, "void"))
707 for (i = 1; i < nr_types; i++) {
708 const struct btf_type *t = btf__type_by_id(btf, i);
709 const char *name = btf__name_by_offset(btf, t->name_off);
711 if (name && !strcmp(type_name, name))
715 return libbpf_err(-ENOENT);
718 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
719 const char *type_name, __u32 kind)
721 __u32 i, nr_types = btf__type_cnt(btf);
723 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
726 for (i = start_id; i < nr_types; i++) {
727 const struct btf_type *t = btf__type_by_id(btf, i);
730 if (btf_kind(t) != kind)
732 name = btf__name_by_offset(btf, t->name_off);
733 if (name && !strcmp(type_name, name))
737 return libbpf_err(-ENOENT);
740 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
743 return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
746 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
749 return btf_find_by_name_kind(btf, 1, type_name, kind);
752 static bool btf_is_modifiable(const struct btf *btf)
754 return (void *)btf->hdr != btf->raw_data;
757 void btf__free(struct btf *btf)
759 if (IS_ERR_OR_NULL(btf))
765 if (btf_is_modifiable(btf)) {
766 /* if BTF was modified after loading, it will have a split
767 * in-memory representation for header, types, and strings
768 * sections, so we need to free all of them individually. It
769 * might still have a cached contiguous raw data present,
770 * which will be unconditionally freed below.
773 free(btf->types_data);
774 strset__free(btf->strs_set);
777 free(btf->raw_data_swapped);
778 free(btf->type_offs);
782 static struct btf *btf_new_empty(struct btf *base_btf)
786 btf = calloc(1, sizeof(*btf));
788 return ERR_PTR(-ENOMEM);
792 btf->start_str_off = 0;
794 btf->ptr_sz = sizeof(void *);
795 btf->swapped_endian = false;
798 btf->base_btf = base_btf;
799 btf->start_id = btf__type_cnt(base_btf);
800 btf->start_str_off = base_btf->hdr->str_len;
803 /* +1 for empty string at offset 0 */
804 btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
805 btf->raw_data = calloc(1, btf->raw_size);
806 if (!btf->raw_data) {
808 return ERR_PTR(-ENOMEM);
811 btf->hdr = btf->raw_data;
812 btf->hdr->hdr_len = sizeof(struct btf_header);
813 btf->hdr->magic = BTF_MAGIC;
814 btf->hdr->version = BTF_VERSION;
816 btf->types_data = btf->raw_data + btf->hdr->hdr_len;
817 btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
818 btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
823 struct btf *btf__new_empty(void)
825 return libbpf_ptr(btf_new_empty(NULL));
828 struct btf *btf__new_empty_split(struct btf *base_btf)
830 return libbpf_ptr(btf_new_empty(base_btf));
833 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
838 btf = calloc(1, sizeof(struct btf));
840 return ERR_PTR(-ENOMEM);
844 btf->start_str_off = 0;
848 btf->base_btf = base_btf;
849 btf->start_id = btf__type_cnt(base_btf);
850 btf->start_str_off = base_btf->hdr->str_len;
853 btf->raw_data = malloc(size);
854 if (!btf->raw_data) {
858 memcpy(btf->raw_data, data, size);
859 btf->raw_size = size;
861 btf->hdr = btf->raw_data;
862 err = btf_parse_hdr(btf);
866 btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
867 btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
869 err = btf_parse_str_sec(btf);
870 err = err ?: btf_parse_type_sec(btf);
883 struct btf *btf__new(const void *data, __u32 size)
885 return libbpf_ptr(btf_new(data, size, NULL));
888 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
889 struct btf_ext **btf_ext)
891 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
892 int err = 0, fd = -1, idx = 0;
893 struct btf *btf = NULL;
899 if (elf_version(EV_CURRENT) == EV_NONE) {
900 pr_warn("failed to init libelf for %s\n", path);
901 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
904 fd = open(path, O_RDONLY | O_CLOEXEC);
907 pr_warn("failed to open %s: %s\n", path, strerror(errno));
911 err = -LIBBPF_ERRNO__FORMAT;
913 elf = elf_begin(fd, ELF_C_READ, NULL);
915 pr_warn("failed to open %s as ELF file\n", path);
918 if (!gelf_getehdr(elf, &ehdr)) {
919 pr_warn("failed to get EHDR from %s\n", path);
923 if (elf_getshdrstrndx(elf, &shstrndx)) {
924 pr_warn("failed to get section names section index for %s\n",
929 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
930 pr_warn("failed to get e_shstrndx from %s\n", path);
934 while ((scn = elf_nextscn(elf, scn)) != NULL) {
939 if (gelf_getshdr(scn, &sh) != &sh) {
940 pr_warn("failed to get section(%d) header from %s\n",
944 name = elf_strptr(elf, shstrndx, sh.sh_name);
946 pr_warn("failed to get section(%d) name from %s\n",
950 if (strcmp(name, BTF_ELF_SEC) == 0) {
951 btf_data = elf_getdata(scn, 0);
953 pr_warn("failed to get section(%d, %s) data from %s\n",
958 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
959 btf_ext_data = elf_getdata(scn, 0);
961 pr_warn("failed to get section(%d, %s) data from %s\n",
975 btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
976 err = libbpf_get_error(btf);
980 switch (gelf_getclass(elf)) {
982 btf__set_pointer_size(btf, 4);
985 btf__set_pointer_size(btf, 8);
988 pr_warn("failed to get ELF class (bitness) for %s\n", path);
992 if (btf_ext && btf_ext_data) {
993 *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
994 err = libbpf_get_error(*btf_ext);
997 } else if (btf_ext) {
1009 btf_ext__free(*btf_ext);
1012 return ERR_PTR(err);
1015 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1017 return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1020 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1022 return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1025 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1027 struct btf *btf = NULL;
1034 f = fopen(path, "rb");
1040 /* check BTF magic */
1041 if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1045 if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1046 /* definitely not a raw BTF */
1052 if (fseek(f, 0, SEEK_END)) {
1061 /* rewind to the start */
1062 if (fseek(f, 0, SEEK_SET)) {
1067 /* pre-alloc memory and read all of BTF data */
1073 if (fread(data, 1, sz, f) < sz) {
1078 /* finally parse BTF data */
1079 btf = btf_new(data, sz, base_btf);
1085 return err ? ERR_PTR(err) : btf;
1088 struct btf *btf__parse_raw(const char *path)
1090 return libbpf_ptr(btf_parse_raw(path, NULL));
1093 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1095 return libbpf_ptr(btf_parse_raw(path, base_btf));
1098 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1106 btf = btf_parse_raw(path, base_btf);
1107 err = libbpf_get_error(btf);
1111 return ERR_PTR(err);
1112 return btf_parse_elf(path, base_btf, btf_ext);
1115 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1117 return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1120 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1122 return libbpf_ptr(btf_parse(path, base_btf, NULL));
1125 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1127 int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
1129 LIBBPF_OPTS(bpf_btf_load_opts, opts);
1130 __u32 buf_sz = 0, raw_size;
1131 char *buf = NULL, *tmp;
1136 return libbpf_err(-EEXIST);
1137 if (log_sz && !log_buf)
1138 return libbpf_err(-EINVAL);
1140 /* cache native raw data representation */
1141 raw_data = btf_get_raw_data(btf, &raw_size, false);
1146 btf->raw_size = raw_size;
1147 btf->raw_data = raw_data;
1150 /* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1151 * initially. Only if BTF loading fails, we bump log_level to 1 and
1152 * retry, using either auto-allocated or custom log_buf. This way
1153 * non-NULL custom log_buf provides a buffer just in case, but hopes
1154 * for successful load and no need for log_buf.
1157 /* if caller didn't provide custom log_buf, we'll keep
1158 * allocating our own progressively bigger buffers for BTF
1162 buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1163 tmp = realloc(buf, buf_sz);
1172 opts.log_buf = log_buf ? log_buf : buf;
1173 opts.log_size = log_buf ? log_sz : buf_sz;
1174 opts.log_level = log_level;
1177 btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1179 /* time to turn on verbose mode and try again */
1180 if (log_level == 0) {
1184 /* only retry if caller didn't provide custom log_buf, but
1185 * make sure we can never overflow buf_sz
1187 if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1191 pr_warn("BTF loading error: %d\n", err);
1192 /* don't print out contents of custom log_buf */
1193 if (!log_buf && buf[0])
1194 pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1199 return libbpf_err(err);
1202 int btf__load_into_kernel(struct btf *btf)
1204 return btf_load_into_kernel(btf, NULL, 0, 0);
1207 int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));
1209 int btf__fd(const struct btf *btf)
1214 void btf__set_fd(struct btf *btf, int fd)
1219 static const void *btf_strs_data(const struct btf *btf)
1221 return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1224 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1226 struct btf_header *hdr = btf->hdr;
1232 data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1234 *size = btf->raw_size;
1238 data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1239 data = calloc(1, data_sz);
1244 memcpy(p, hdr, hdr->hdr_len);
1249 memcpy(p, btf->types_data, hdr->type_len);
1251 for (i = 0; i < btf->nr_types; i++) {
1252 t = p + btf->type_offs[i];
1253 /* btf_bswap_type_rest() relies on native t->info, so
1254 * we swap base type info after we swapped all the
1255 * additional information
1257 if (btf_bswap_type_rest(t))
1259 btf_bswap_type_base(t);
1264 memcpy(p, btf_strs_data(btf), hdr->str_len);
1274 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1276 struct btf *btf = (struct btf *)btf_ro;
1280 data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1282 return errno = ENOMEM, NULL;
1284 btf->raw_size = data_sz;
1285 if (btf->swapped_endian)
1286 btf->raw_data_swapped = data;
1288 btf->raw_data = data;
1293 __attribute__((alias("btf__raw_data")))
1294 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1296 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1298 if (offset < btf->start_str_off)
1299 return btf__str_by_offset(btf->base_btf, offset);
1300 else if (offset - btf->start_str_off < btf->hdr->str_len)
1301 return btf_strs_data(btf) + (offset - btf->start_str_off);
1303 return errno = EINVAL, NULL;
1306 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1308 return btf__str_by_offset(btf, offset);
1311 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1313 struct bpf_btf_info btf_info;
1314 __u32 len = sizeof(btf_info);
1320 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
1321 * let's start with a sane default - 4KiB here - and resize it only if
1322 * bpf_obj_get_info_by_fd() needs a bigger buffer.
1325 ptr = malloc(last_size);
1327 return ERR_PTR(-ENOMEM);
1329 memset(&btf_info, 0, sizeof(btf_info));
1330 btf_info.btf = ptr_to_u64(ptr);
1331 btf_info.btf_size = last_size;
1332 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
1334 if (!err && btf_info.btf_size > last_size) {
1337 last_size = btf_info.btf_size;
1338 temp_ptr = realloc(ptr, last_size);
1340 btf = ERR_PTR(-ENOMEM);
1345 len = sizeof(btf_info);
1346 memset(&btf_info, 0, sizeof(btf_info));
1347 btf_info.btf = ptr_to_u64(ptr);
1348 btf_info.btf_size = last_size;
1350 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
1353 if (err || btf_info.btf_size > last_size) {
1354 btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1358 btf = btf_new(ptr, btf_info.btf_size, base_btf);
1365 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1370 btf_fd = bpf_btf_get_fd_by_id(id);
1372 return libbpf_err_ptr(-errno);
1374 btf = btf_get_from_fd(btf_fd, base_btf);
1377 return libbpf_ptr(btf);
1380 struct btf *btf__load_from_kernel_by_id(__u32 id)
1382 return btf__load_from_kernel_by_id_split(id, NULL);
1385 int btf__get_from_id(__u32 id, struct btf **btf)
1391 res = btf__load_from_kernel_by_id(id);
1392 err = libbpf_get_error(res);
1395 return libbpf_err(err);
1401 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
1402 __u32 expected_key_size, __u32 expected_value_size,
1403 __u32 *key_type_id, __u32 *value_type_id)
1405 const struct btf_type *container_type;
1406 const struct btf_member *key, *value;
1407 const size_t max_name = 256;
1408 char container_name[max_name];
1409 __s64 key_size, value_size;
1412 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) {
1413 pr_warn("map:%s length of '____btf_map_%s' is too long\n",
1414 map_name, map_name);
1415 return libbpf_err(-EINVAL);
1418 container_id = btf__find_by_name(btf, container_name);
1419 if (container_id < 0) {
1420 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1421 map_name, container_name);
1422 return libbpf_err(container_id);
1425 container_type = btf__type_by_id(btf, container_id);
1426 if (!container_type) {
1427 pr_warn("map:%s cannot find BTF type for container_id:%u\n",
1428 map_name, container_id);
1429 return libbpf_err(-EINVAL);
1432 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
1433 pr_warn("map:%s container_name:%s is an invalid container struct\n",
1434 map_name, container_name);
1435 return libbpf_err(-EINVAL);
1438 key = btf_members(container_type);
1441 key_size = btf__resolve_size(btf, key->type);
1443 pr_warn("map:%s invalid BTF key_type_size\n", map_name);
1444 return libbpf_err(key_size);
1447 if (expected_key_size != key_size) {
1448 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1449 map_name, (__u32)key_size, expected_key_size);
1450 return libbpf_err(-EINVAL);
1453 value_size = btf__resolve_size(btf, value->type);
1454 if (value_size < 0) {
1455 pr_warn("map:%s invalid BTF value_type_size\n", map_name);
1456 return libbpf_err(value_size);
1459 if (expected_value_size != value_size) {
1460 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1461 map_name, (__u32)value_size, expected_value_size);
1462 return libbpf_err(-EINVAL);
1465 *key_type_id = key->type;
1466 *value_type_id = value->type;
1471 static void btf_invalidate_raw_data(struct btf *btf)
1473 if (btf->raw_data) {
1474 free(btf->raw_data);
1475 btf->raw_data = NULL;
1477 if (btf->raw_data_swapped) {
1478 free(btf->raw_data_swapped);
1479 btf->raw_data_swapped = NULL;
1483 /* Ensure BTF is ready to be modified (by splitting into a three memory
1484 * regions for header, types, and strings). Also invalidate cached
1487 static int btf_ensure_modifiable(struct btf *btf)
1490 struct strset *set = NULL;
1493 if (btf_is_modifiable(btf)) {
1494 /* any BTF modification invalidates raw_data */
1495 btf_invalidate_raw_data(btf);
1499 /* split raw data into three memory regions */
1500 hdr = malloc(btf->hdr->hdr_len);
1501 types = malloc(btf->hdr->type_len);
1505 memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1506 memcpy(types, btf->types_data, btf->hdr->type_len);
1508 /* build lookup index for all strings */
1509 set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1515 /* only when everything was successful, update internal state */
1517 btf->types_data = types;
1518 btf->types_data_cap = btf->hdr->type_len;
1519 btf->strs_data = NULL;
1520 btf->strs_set = set;
1521 /* if BTF was created from scratch, all strings are guaranteed to be
1522 * unique and deduplicated
1524 if (btf->hdr->str_len == 0)
1525 btf->strs_deduped = true;
1526 if (!btf->base_btf && btf->hdr->str_len == 1)
1527 btf->strs_deduped = true;
1529 /* invalidate raw_data representation */
1530 btf_invalidate_raw_data(btf);
1541 /* Find an offset in BTF string section that corresponds to a given string *s*.
1543 * - >0 offset into string section, if string is found;
1544 * - -ENOENT, if string is not in the string section;
1545 * - <0, on any other error.
1547 int btf__find_str(struct btf *btf, const char *s)
1551 if (btf->base_btf) {
1552 off = btf__find_str(btf->base_btf, s);
1557 /* BTF needs to be in a modifiable state to build string lookup index */
1558 if (btf_ensure_modifiable(btf))
1559 return libbpf_err(-ENOMEM);
1561 off = strset__find_str(btf->strs_set, s);
1563 return libbpf_err(off);
1565 return btf->start_str_off + off;
1568 /* Add a string s to the BTF string section.
1570 * - > 0 offset into string section, on success;
1573 int btf__add_str(struct btf *btf, const char *s)
1577 if (btf->base_btf) {
1578 off = btf__find_str(btf->base_btf, s);
1583 if (btf_ensure_modifiable(btf))
1584 return libbpf_err(-ENOMEM);
1586 off = strset__add_str(btf->strs_set, s);
1588 return libbpf_err(off);
1590 btf->hdr->str_len = strset__data_size(btf->strs_set);
1592 return btf->start_str_off + off;
1595 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1597 return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1598 btf->hdr->type_len, UINT_MAX, add_sz);
1601 static void btf_type_inc_vlen(struct btf_type *t)
1603 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1606 static int btf_commit_type(struct btf *btf, int data_sz)
1610 err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1612 return libbpf_err(err);
1614 btf->hdr->type_len += data_sz;
1615 btf->hdr->str_off += data_sz;
1617 return btf->start_id + btf->nr_types - 1;
1621 const struct btf *src;
1623 struct hashmap *str_off_map; /* map string offsets from src to dst */
1626 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1628 struct btf_pipe *p = ctx;
1632 if (!*str_off) /* nothing to do for empty strings */
1635 if (p->str_off_map &&
1636 hashmap__find(p->str_off_map, (void *)(long)*str_off, &mapped_off)) {
1637 *str_off = (__u32)(long)mapped_off;
1641 off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1645 /* Remember string mapping from src to dst. It avoids
1646 * performing expensive string comparisons.
1648 if (p->str_off_map) {
1649 err = hashmap__append(p->str_off_map, (void *)(long)*str_off, (void *)(long)off);
1658 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1660 struct btf_pipe p = { .src = src_btf, .dst = btf };
1664 sz = btf_type_size(src_type);
1666 return libbpf_err(sz);
1668 /* deconstruct BTF, if necessary, and invalidate raw_data */
1669 if (btf_ensure_modifiable(btf))
1670 return libbpf_err(-ENOMEM);
1672 t = btf_add_type_mem(btf, sz);
1674 return libbpf_err(-ENOMEM);
1676 memcpy(t, src_type, sz);
1678 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1680 return libbpf_err(err);
1682 return btf_commit_type(btf, sz);
1685 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1687 struct btf *btf = ctx;
1689 if (!*type_id) /* nothing to do for VOID references */
1692 /* we haven't updated btf's type count yet, so
1693 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1694 * add to all newly added BTF types
1696 *type_id += btf->start_id + btf->nr_types - 1;
1700 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx);
1701 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx);
1703 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1705 struct btf_pipe p = { .src = src_btf, .dst = btf };
1706 int data_sz, sz, cnt, i, err, old_strs_len;
1710 /* appending split BTF isn't supported yet */
1711 if (src_btf->base_btf)
1712 return libbpf_err(-ENOTSUP);
1714 /* deconstruct BTF, if necessary, and invalidate raw_data */
1715 if (btf_ensure_modifiable(btf))
1716 return libbpf_err(-ENOMEM);
1718 /* remember original strings section size if we have to roll back
1719 * partial strings section changes
1721 old_strs_len = btf->hdr->str_len;
1723 data_sz = src_btf->hdr->type_len;
1724 cnt = btf__type_cnt(src_btf) - 1;
1726 /* pre-allocate enough memory for new types */
1727 t = btf_add_type_mem(btf, data_sz);
1729 return libbpf_err(-ENOMEM);
1731 /* pre-allocate enough memory for type offset index for new types */
1732 off = btf_add_type_offs_mem(btf, cnt);
1734 return libbpf_err(-ENOMEM);
1736 /* Map the string offsets from src_btf to the offsets from btf to improve performance */
1737 p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1738 if (IS_ERR(p.str_off_map))
1739 return libbpf_err(-ENOMEM);
1741 /* bulk copy types data for all types from src_btf */
1742 memcpy(t, src_btf->types_data, data_sz);
1744 for (i = 0; i < cnt; i++) {
1745 sz = btf_type_size(t);
1747 /* unlikely, has to be corrupted src_btf */
1752 /* fill out type ID to type offset mapping for lookups by type ID */
1753 *off = t - btf->types_data;
1755 /* add, dedup, and remap strings referenced by this BTF type */
1756 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1760 /* remap all type IDs referenced from this BTF type */
1761 err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1765 /* go to next type data and type offset index entry */
1770 /* Up until now any of the copied type data was effectively invisible,
1771 * so if we exited early before this point due to error, BTF would be
1772 * effectively unmodified. There would be extra internal memory
1773 * pre-allocated, but it would not be available for querying. But now
1774 * that we've copied and rewritten all the data successfully, we can
1775 * update type count and various internal offsets and sizes to
1776 * "commit" the changes and made them visible to the outside world.
1778 btf->hdr->type_len += data_sz;
1779 btf->hdr->str_off += data_sz;
1780 btf->nr_types += cnt;
1782 hashmap__free(p.str_off_map);
1784 /* return type ID of the first added BTF type */
1785 return btf->start_id + btf->nr_types - cnt;
1787 /* zero out preallocated memory as if it was just allocated with
1790 memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1791 memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1793 /* and now restore original strings section size; types data size
1794 * wasn't modified, so doesn't need restoring, see big comment above */
1795 btf->hdr->str_len = old_strs_len;
1797 hashmap__free(p.str_off_map);
1799 return libbpf_err(err);
1803 * Append new BTF_KIND_INT type with:
1804 * - *name* - non-empty, non-NULL type name;
1805 * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1806 * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1808 * - >0, type ID of newly added BTF type;
1811 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1816 /* non-empty name */
1817 if (!name || !name[0])
1818 return libbpf_err(-EINVAL);
1819 /* byte_sz must be power of 2 */
1820 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1821 return libbpf_err(-EINVAL);
1822 if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1823 return libbpf_err(-EINVAL);
1825 /* deconstruct BTF, if necessary, and invalidate raw_data */
1826 if (btf_ensure_modifiable(btf))
1827 return libbpf_err(-ENOMEM);
1829 sz = sizeof(struct btf_type) + sizeof(int);
1830 t = btf_add_type_mem(btf, sz);
1832 return libbpf_err(-ENOMEM);
1834 /* if something goes wrong later, we might end up with an extra string,
1835 * but that shouldn't be a problem, because BTF can't be constructed
1836 * completely anyway and will most probably be just discarded
1838 name_off = btf__add_str(btf, name);
1842 t->name_off = name_off;
1843 t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1845 /* set INT info, we don't allow setting legacy bit offset/size */
1846 *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1848 return btf_commit_type(btf, sz);
1852 * Append new BTF_KIND_FLOAT type with:
1853 * - *name* - non-empty, non-NULL type name;
1854 * - *sz* - size of the type, in bytes;
1856 * - >0, type ID of newly added BTF type;
1859 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1864 /* non-empty name */
1865 if (!name || !name[0])
1866 return libbpf_err(-EINVAL);
1868 /* byte_sz must be one of the explicitly allowed values */
1869 if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1871 return libbpf_err(-EINVAL);
1873 if (btf_ensure_modifiable(btf))
1874 return libbpf_err(-ENOMEM);
1876 sz = sizeof(struct btf_type);
1877 t = btf_add_type_mem(btf, sz);
1879 return libbpf_err(-ENOMEM);
1881 name_off = btf__add_str(btf, name);
1885 t->name_off = name_off;
1886 t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
1889 return btf_commit_type(btf, sz);
1892 /* it's completely legal to append BTF types with type IDs pointing forward to
1893 * types that haven't been appended yet, so we only make sure that id looks
1894 * sane, we can't guarantee that ID will always be valid
1896 static int validate_type_id(int id)
1898 if (id < 0 || id > BTF_MAX_NR_TYPES)
1903 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
1904 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
1907 int sz, name_off = 0;
1909 if (validate_type_id(ref_type_id))
1910 return libbpf_err(-EINVAL);
1912 if (btf_ensure_modifiable(btf))
1913 return libbpf_err(-ENOMEM);
1915 sz = sizeof(struct btf_type);
1916 t = btf_add_type_mem(btf, sz);
1918 return libbpf_err(-ENOMEM);
1920 if (name && name[0]) {
1921 name_off = btf__add_str(btf, name);
1926 t->name_off = name_off;
1927 t->info = btf_type_info(kind, 0, 0);
1928 t->type = ref_type_id;
1930 return btf_commit_type(btf, sz);
1934 * Append new BTF_KIND_PTR type with:
1935 * - *ref_type_id* - referenced type ID, it might not exist yet;
1937 * - >0, type ID of newly added BTF type;
1940 int btf__add_ptr(struct btf *btf, int ref_type_id)
1942 return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
1946 * Append new BTF_KIND_ARRAY type with:
1947 * - *index_type_id* - type ID of the type describing array index;
1948 * - *elem_type_id* - type ID of the type describing array element;
1949 * - *nr_elems* - the size of the array;
1951 * - >0, type ID of newly added BTF type;
1954 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
1957 struct btf_array *a;
1960 if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
1961 return libbpf_err(-EINVAL);
1963 if (btf_ensure_modifiable(btf))
1964 return libbpf_err(-ENOMEM);
1966 sz = sizeof(struct btf_type) + sizeof(struct btf_array);
1967 t = btf_add_type_mem(btf, sz);
1969 return libbpf_err(-ENOMEM);
1972 t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
1976 a->type = elem_type_id;
1977 a->index_type = index_type_id;
1978 a->nelems = nr_elems;
1980 return btf_commit_type(btf, sz);
1983 /* generic STRUCT/UNION append function */
1984 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
1987 int sz, name_off = 0;
1989 if (btf_ensure_modifiable(btf))
1990 return libbpf_err(-ENOMEM);
1992 sz = sizeof(struct btf_type);
1993 t = btf_add_type_mem(btf, sz);
1995 return libbpf_err(-ENOMEM);
1997 if (name && name[0]) {
1998 name_off = btf__add_str(btf, name);
2003 /* start out with vlen=0 and no kflag; this will be adjusted when
2004 * adding each member
2006 t->name_off = name_off;
2007 t->info = btf_type_info(kind, 0, 0);
2010 return btf_commit_type(btf, sz);
2014 * Append new BTF_KIND_STRUCT type with:
2015 * - *name* - name of the struct, can be NULL or empty for anonymous structs;
2016 * - *byte_sz* - size of the struct, in bytes;
2018 * Struct initially has no fields in it. Fields can be added by
2019 * btf__add_field() right after btf__add_struct() succeeds.
2022 * - >0, type ID of newly added BTF type;
2025 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2027 return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2031 * Append new BTF_KIND_UNION type with:
2032 * - *name* - name of the union, can be NULL or empty for anonymous union;
2033 * - *byte_sz* - size of the union, in bytes;
2035 * Union initially has no fields in it. Fields can be added by
2036 * btf__add_field() right after btf__add_union() succeeds. All fields
2037 * should have *bit_offset* of 0.
2040 * - >0, type ID of newly added BTF type;
2043 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2045 return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2048 static struct btf_type *btf_last_type(struct btf *btf)
2050 return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2054 * Append new field for the current STRUCT/UNION type with:
2055 * - *name* - name of the field, can be NULL or empty for anonymous field;
2056 * - *type_id* - type ID for the type describing field type;
2057 * - *bit_offset* - bit offset of the start of the field within struct/union;
2058 * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2063 int btf__add_field(struct btf *btf, const char *name, int type_id,
2064 __u32 bit_offset, __u32 bit_size)
2067 struct btf_member *m;
2069 int sz, name_off = 0;
2071 /* last type should be union/struct */
2072 if (btf->nr_types == 0)
2073 return libbpf_err(-EINVAL);
2074 t = btf_last_type(btf);
2075 if (!btf_is_composite(t))
2076 return libbpf_err(-EINVAL);
2078 if (validate_type_id(type_id))
2079 return libbpf_err(-EINVAL);
2080 /* best-effort bit field offset/size enforcement */
2081 is_bitfield = bit_size || (bit_offset % 8 != 0);
2082 if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2083 return libbpf_err(-EINVAL);
2085 /* only offset 0 is allowed for unions */
2086 if (btf_is_union(t) && bit_offset)
2087 return libbpf_err(-EINVAL);
2089 /* decompose and invalidate raw data */
2090 if (btf_ensure_modifiable(btf))
2091 return libbpf_err(-ENOMEM);
2093 sz = sizeof(struct btf_member);
2094 m = btf_add_type_mem(btf, sz);
2096 return libbpf_err(-ENOMEM);
2098 if (name && name[0]) {
2099 name_off = btf__add_str(btf, name);
2104 m->name_off = name_off;
2106 m->offset = bit_offset | (bit_size << 24);
2108 /* btf_add_type_mem can invalidate t pointer */
2109 t = btf_last_type(btf);
2110 /* update parent type's vlen and kflag */
2111 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2113 btf->hdr->type_len += sz;
2114 btf->hdr->str_off += sz;
2119 * Append new BTF_KIND_ENUM type with:
2120 * - *name* - name of the enum, can be NULL or empty for anonymous enums;
2121 * - *byte_sz* - size of the enum, in bytes.
2123 * Enum initially has no enum values in it (and corresponds to enum forward
2124 * declaration). Enumerator values can be added by btf__add_enum_value()
2125 * immediately after btf__add_enum() succeeds.
2128 * - >0, type ID of newly added BTF type;
2131 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2134 int sz, name_off = 0;
2136 /* byte_sz must be power of 2 */
2137 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2138 return libbpf_err(-EINVAL);
2140 if (btf_ensure_modifiable(btf))
2141 return libbpf_err(-ENOMEM);
2143 sz = sizeof(struct btf_type);
2144 t = btf_add_type_mem(btf, sz);
2146 return libbpf_err(-ENOMEM);
2148 if (name && name[0]) {
2149 name_off = btf__add_str(btf, name);
2154 /* start out with vlen=0; it will be adjusted when adding enum values */
2155 t->name_off = name_off;
2156 t->info = btf_type_info(BTF_KIND_ENUM, 0, 0);
2159 return btf_commit_type(btf, sz);
2163 * Append new enum value for the current ENUM type with:
2164 * - *name* - name of the enumerator value, can't be NULL or empty;
2165 * - *value* - integer value corresponding to enum value *name*;
2170 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2176 /* last type should be BTF_KIND_ENUM */
2177 if (btf->nr_types == 0)
2178 return libbpf_err(-EINVAL);
2179 t = btf_last_type(btf);
2180 if (!btf_is_enum(t))
2181 return libbpf_err(-EINVAL);
2183 /* non-empty name */
2184 if (!name || !name[0])
2185 return libbpf_err(-EINVAL);
2186 if (value < INT_MIN || value > UINT_MAX)
2187 return libbpf_err(-E2BIG);
2189 /* decompose and invalidate raw data */
2190 if (btf_ensure_modifiable(btf))
2191 return libbpf_err(-ENOMEM);
2193 sz = sizeof(struct btf_enum);
2194 v = btf_add_type_mem(btf, sz);
2196 return libbpf_err(-ENOMEM);
2198 name_off = btf__add_str(btf, name);
2202 v->name_off = name_off;
2205 /* update parent type's vlen */
2206 t = btf_last_type(btf);
2207 btf_type_inc_vlen(t);
2209 btf->hdr->type_len += sz;
2210 btf->hdr->str_off += sz;
2215 * Append new BTF_KIND_FWD type with:
2216 * - *name*, non-empty/non-NULL name;
2217 * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2218 * BTF_FWD_UNION, or BTF_FWD_ENUM;
2220 * - >0, type ID of newly added BTF type;
2223 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2225 if (!name || !name[0])
2226 return libbpf_err(-EINVAL);
2229 case BTF_FWD_STRUCT:
2230 case BTF_FWD_UNION: {
2234 id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2237 t = btf_type_by_id(btf, id);
2238 t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2242 /* enum forward in BTF currently is just an enum with no enum
2243 * values; we also assume a standard 4-byte size for it
2245 return btf__add_enum(btf, name, sizeof(int));
2247 return libbpf_err(-EINVAL);
2252 * Append new BTF_KING_TYPEDEF type with:
2253 * - *name*, non-empty/non-NULL name;
2254 * - *ref_type_id* - referenced type ID, it might not exist yet;
2256 * - >0, type ID of newly added BTF type;
2259 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2261 if (!name || !name[0])
2262 return libbpf_err(-EINVAL);
2264 return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2268 * Append new BTF_KIND_VOLATILE type with:
2269 * - *ref_type_id* - referenced type ID, it might not exist yet;
2271 * - >0, type ID of newly added BTF type;
2274 int btf__add_volatile(struct btf *btf, int ref_type_id)
2276 return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2280 * Append new BTF_KIND_CONST type with:
2281 * - *ref_type_id* - referenced type ID, it might not exist yet;
2283 * - >0, type ID of newly added BTF type;
2286 int btf__add_const(struct btf *btf, int ref_type_id)
2288 return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2292 * Append new BTF_KIND_RESTRICT type with:
2293 * - *ref_type_id* - referenced type ID, it might not exist yet;
2295 * - >0, type ID of newly added BTF type;
2298 int btf__add_restrict(struct btf *btf, int ref_type_id)
2300 return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2304 * Append new BTF_KIND_TYPE_TAG type with:
2305 * - *value*, non-empty/non-NULL tag value;
2306 * - *ref_type_id* - referenced type ID, it might not exist yet;
2308 * - >0, type ID of newly added BTF type;
2311 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2313 if (!value|| !value[0])
2314 return libbpf_err(-EINVAL);
2316 return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2320 * Append new BTF_KIND_FUNC type with:
2321 * - *name*, non-empty/non-NULL name;
2322 * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2324 * - >0, type ID of newly added BTF type;
2327 int btf__add_func(struct btf *btf, const char *name,
2328 enum btf_func_linkage linkage, int proto_type_id)
2332 if (!name || !name[0])
2333 return libbpf_err(-EINVAL);
2334 if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2335 linkage != BTF_FUNC_EXTERN)
2336 return libbpf_err(-EINVAL);
2338 id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2340 struct btf_type *t = btf_type_by_id(btf, id);
2342 t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2344 return libbpf_err(id);
2348 * Append new BTF_KIND_FUNC_PROTO with:
2349 * - *ret_type_id* - type ID for return result of a function.
2351 * Function prototype initially has no arguments, but they can be added by
2352 * btf__add_func_param() one by one, immediately after
2353 * btf__add_func_proto() succeeded.
2356 * - >0, type ID of newly added BTF type;
2359 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2364 if (validate_type_id(ret_type_id))
2365 return libbpf_err(-EINVAL);
2367 if (btf_ensure_modifiable(btf))
2368 return libbpf_err(-ENOMEM);
2370 sz = sizeof(struct btf_type);
2371 t = btf_add_type_mem(btf, sz);
2373 return libbpf_err(-ENOMEM);
2375 /* start out with vlen=0; this will be adjusted when adding enum
2376 * values, if necessary
2379 t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2380 t->type = ret_type_id;
2382 return btf_commit_type(btf, sz);
2386 * Append new function parameter for current FUNC_PROTO type with:
2387 * - *name* - parameter name, can be NULL or empty;
2388 * - *type_id* - type ID describing the type of the parameter.
2393 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2396 struct btf_param *p;
2397 int sz, name_off = 0;
2399 if (validate_type_id(type_id))
2400 return libbpf_err(-EINVAL);
2402 /* last type should be BTF_KIND_FUNC_PROTO */
2403 if (btf->nr_types == 0)
2404 return libbpf_err(-EINVAL);
2405 t = btf_last_type(btf);
2406 if (!btf_is_func_proto(t))
2407 return libbpf_err(-EINVAL);
2409 /* decompose and invalidate raw data */
2410 if (btf_ensure_modifiable(btf))
2411 return libbpf_err(-ENOMEM);
2413 sz = sizeof(struct btf_param);
2414 p = btf_add_type_mem(btf, sz);
2416 return libbpf_err(-ENOMEM);
2418 if (name && name[0]) {
2419 name_off = btf__add_str(btf, name);
2424 p->name_off = name_off;
2427 /* update parent type's vlen */
2428 t = btf_last_type(btf);
2429 btf_type_inc_vlen(t);
2431 btf->hdr->type_len += sz;
2432 btf->hdr->str_off += sz;
2437 * Append new BTF_KIND_VAR type with:
2438 * - *name* - non-empty/non-NULL name;
2439 * - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2440 * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2441 * - *type_id* - type ID of the type describing the type of the variable.
2443 * - >0, type ID of newly added BTF type;
2446 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2452 /* non-empty name */
2453 if (!name || !name[0])
2454 return libbpf_err(-EINVAL);
2455 if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2456 linkage != BTF_VAR_GLOBAL_EXTERN)
2457 return libbpf_err(-EINVAL);
2458 if (validate_type_id(type_id))
2459 return libbpf_err(-EINVAL);
2461 /* deconstruct BTF, if necessary, and invalidate raw_data */
2462 if (btf_ensure_modifiable(btf))
2463 return libbpf_err(-ENOMEM);
2465 sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2466 t = btf_add_type_mem(btf, sz);
2468 return libbpf_err(-ENOMEM);
2470 name_off = btf__add_str(btf, name);
2474 t->name_off = name_off;
2475 t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2479 v->linkage = linkage;
2481 return btf_commit_type(btf, sz);
2485 * Append new BTF_KIND_DATASEC type with:
2486 * - *name* - non-empty/non-NULL name;
2487 * - *byte_sz* - data section size, in bytes.
2489 * Data section is initially empty. Variables info can be added with
2490 * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2493 * - >0, type ID of newly added BTF type;
2496 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2501 /* non-empty name */
2502 if (!name || !name[0])
2503 return libbpf_err(-EINVAL);
2505 if (btf_ensure_modifiable(btf))
2506 return libbpf_err(-ENOMEM);
2508 sz = sizeof(struct btf_type);
2509 t = btf_add_type_mem(btf, sz);
2511 return libbpf_err(-ENOMEM);
2513 name_off = btf__add_str(btf, name);
2517 /* start with vlen=0, which will be update as var_secinfos are added */
2518 t->name_off = name_off;
2519 t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2522 return btf_commit_type(btf, sz);
2526 * Append new data section variable information entry for current DATASEC type:
2527 * - *var_type_id* - type ID, describing type of the variable;
2528 * - *offset* - variable offset within data section, in bytes;
2529 * - *byte_sz* - variable size, in bytes.
2535 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2538 struct btf_var_secinfo *v;
2541 /* last type should be BTF_KIND_DATASEC */
2542 if (btf->nr_types == 0)
2543 return libbpf_err(-EINVAL);
2544 t = btf_last_type(btf);
2545 if (!btf_is_datasec(t))
2546 return libbpf_err(-EINVAL);
2548 if (validate_type_id(var_type_id))
2549 return libbpf_err(-EINVAL);
2551 /* decompose and invalidate raw data */
2552 if (btf_ensure_modifiable(btf))
2553 return libbpf_err(-ENOMEM);
2555 sz = sizeof(struct btf_var_secinfo);
2556 v = btf_add_type_mem(btf, sz);
2558 return libbpf_err(-ENOMEM);
2560 v->type = var_type_id;
2564 /* update parent type's vlen */
2565 t = btf_last_type(btf);
2566 btf_type_inc_vlen(t);
2568 btf->hdr->type_len += sz;
2569 btf->hdr->str_off += sz;
2574 * Append new BTF_KIND_DECL_TAG type with:
2575 * - *value* - non-empty/non-NULL string;
2576 * - *ref_type_id* - referenced type ID, it might not exist yet;
2577 * - *component_idx* - -1 for tagging reference type, otherwise struct/union
2578 * member or function argument index;
2580 * - >0, type ID of newly added BTF type;
2583 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2589 if (!value || !value[0] || component_idx < -1)
2590 return libbpf_err(-EINVAL);
2592 if (validate_type_id(ref_type_id))
2593 return libbpf_err(-EINVAL);
2595 if (btf_ensure_modifiable(btf))
2596 return libbpf_err(-ENOMEM);
2598 sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2599 t = btf_add_type_mem(btf, sz);
2601 return libbpf_err(-ENOMEM);
2603 value_off = btf__add_str(btf, value);
2607 t->name_off = value_off;
2608 t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2609 t->type = ref_type_id;
2610 btf_decl_tag(t)->component_idx = component_idx;
2612 return btf_commit_type(btf, sz);
2615 struct btf_ext_sec_setup_param {
2619 struct btf_ext_info *ext_info;
2623 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2624 struct btf_ext_sec_setup_param *ext_sec)
2626 const struct btf_ext_info_sec *sinfo;
2627 struct btf_ext_info *ext_info;
2628 __u32 info_left, record_size;
2630 /* The start of the info sec (including the __u32 record_size). */
2633 if (ext_sec->len == 0)
2636 if (ext_sec->off & 0x03) {
2637 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2642 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2643 info_left = ext_sec->len;
2645 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2646 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2647 ext_sec->desc, ext_sec->off, ext_sec->len);
2651 /* At least a record size */
2652 if (info_left < sizeof(__u32)) {
2653 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2657 /* The record size needs to meet the minimum standard */
2658 record_size = *(__u32 *)info;
2659 if (record_size < ext_sec->min_rec_size ||
2660 record_size & 0x03) {
2661 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2662 ext_sec->desc, record_size);
2666 sinfo = info + sizeof(__u32);
2667 info_left -= sizeof(__u32);
2669 /* If no records, return failure now so .BTF.ext won't be used. */
2671 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2676 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2677 __u64 total_record_size;
2680 if (info_left < sec_hdrlen) {
2681 pr_debug("%s section header is not found in .BTF.ext\n",
2686 num_records = sinfo->num_info;
2687 if (num_records == 0) {
2688 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2693 total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2694 if (info_left < total_record_size) {
2695 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2700 info_left -= total_record_size;
2701 sinfo = (void *)sinfo + total_record_size;
2705 ext_info = ext_sec->ext_info;
2706 ext_info->len = ext_sec->len - sizeof(__u32);
2707 ext_info->rec_size = record_size;
2708 ext_info->info = info + sizeof(__u32);
2709 ext_info->sec_cnt = sec_cnt;
2714 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2716 struct btf_ext_sec_setup_param param = {
2717 .off = btf_ext->hdr->func_info_off,
2718 .len = btf_ext->hdr->func_info_len,
2719 .min_rec_size = sizeof(struct bpf_func_info_min),
2720 .ext_info = &btf_ext->func_info,
2724 return btf_ext_setup_info(btf_ext, ¶m);
2727 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2729 struct btf_ext_sec_setup_param param = {
2730 .off = btf_ext->hdr->line_info_off,
2731 .len = btf_ext->hdr->line_info_len,
2732 .min_rec_size = sizeof(struct bpf_line_info_min),
2733 .ext_info = &btf_ext->line_info,
2734 .desc = "line_info",
2737 return btf_ext_setup_info(btf_ext, ¶m);
2740 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2742 struct btf_ext_sec_setup_param param = {
2743 .off = btf_ext->hdr->core_relo_off,
2744 .len = btf_ext->hdr->core_relo_len,
2745 .min_rec_size = sizeof(struct bpf_core_relo),
2746 .ext_info = &btf_ext->core_relo_info,
2747 .desc = "core_relo",
2750 return btf_ext_setup_info(btf_ext, ¶m);
2753 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2755 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2757 if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2758 data_size < hdr->hdr_len) {
2759 pr_debug("BTF.ext header not found");
2763 if (hdr->magic == bswap_16(BTF_MAGIC)) {
2764 pr_warn("BTF.ext in non-native endianness is not supported\n");
2766 } else if (hdr->magic != BTF_MAGIC) {
2767 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2771 if (hdr->version != BTF_VERSION) {
2772 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2777 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2781 if (data_size == hdr->hdr_len) {
2782 pr_debug("BTF.ext has no data\n");
2789 void btf_ext__free(struct btf_ext *btf_ext)
2791 if (IS_ERR_OR_NULL(btf_ext))
2793 free(btf_ext->func_info.sec_idxs);
2794 free(btf_ext->line_info.sec_idxs);
2795 free(btf_ext->core_relo_info.sec_idxs);
2796 free(btf_ext->data);
2800 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
2802 struct btf_ext *btf_ext;
2805 btf_ext = calloc(1, sizeof(struct btf_ext));
2807 return libbpf_err_ptr(-ENOMEM);
2809 btf_ext->data_size = size;
2810 btf_ext->data = malloc(size);
2811 if (!btf_ext->data) {
2815 memcpy(btf_ext->data, data, size);
2817 err = btf_ext_parse_hdr(btf_ext->data, size);
2821 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
2826 err = btf_ext_setup_func_info(btf_ext);
2830 err = btf_ext_setup_line_info(btf_ext);
2834 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
2835 goto done; /* skip core relos parsing */
2837 err = btf_ext_setup_core_relos(btf_ext);
2843 btf_ext__free(btf_ext);
2844 return libbpf_err_ptr(err);
2850 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
2852 *size = btf_ext->data_size;
2853 return btf_ext->data;
2856 static int btf_ext_reloc_info(const struct btf *btf,
2857 const struct btf_ext_info *ext_info,
2858 const char *sec_name, __u32 insns_cnt,
2859 void **info, __u32 *cnt)
2861 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
2862 __u32 i, record_size, existing_len, records_len;
2863 struct btf_ext_info_sec *sinfo;
2864 const char *info_sec_name;
2868 record_size = ext_info->rec_size;
2869 sinfo = ext_info->info;
2870 remain_len = ext_info->len;
2871 while (remain_len > 0) {
2872 records_len = sinfo->num_info * record_size;
2873 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
2874 if (strcmp(info_sec_name, sec_name)) {
2875 remain_len -= sec_hdrlen + records_len;
2876 sinfo = (void *)sinfo + sec_hdrlen + records_len;
2880 existing_len = (*cnt) * record_size;
2881 data = realloc(*info, existing_len + records_len);
2883 return libbpf_err(-ENOMEM);
2885 memcpy(data + existing_len, sinfo->data, records_len);
2886 /* adjust insn_off only, the rest data will be passed
2889 for (i = 0; i < sinfo->num_info; i++) {
2892 insn_off = data + existing_len + (i * record_size);
2893 *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt;
2896 *cnt += sinfo->num_info;
2900 return libbpf_err(-ENOENT);
2903 int btf_ext__reloc_func_info(const struct btf *btf,
2904 const struct btf_ext *btf_ext,
2905 const char *sec_name, __u32 insns_cnt,
2906 void **func_info, __u32 *cnt)
2908 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
2909 insns_cnt, func_info, cnt);
2912 int btf_ext__reloc_line_info(const struct btf *btf,
2913 const struct btf_ext *btf_ext,
2914 const char *sec_name, __u32 insns_cnt,
2915 void **line_info, __u32 *cnt)
2917 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
2918 insns_cnt, line_info, cnt);
2921 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
2923 return btf_ext->func_info.rec_size;
2926 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
2928 return btf_ext->line_info.rec_size;
2933 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
2934 static void btf_dedup_free(struct btf_dedup *d);
2935 static int btf_dedup_prep(struct btf_dedup *d);
2936 static int btf_dedup_strings(struct btf_dedup *d);
2937 static int btf_dedup_prim_types(struct btf_dedup *d);
2938 static int btf_dedup_struct_types(struct btf_dedup *d);
2939 static int btf_dedup_ref_types(struct btf_dedup *d);
2940 static int btf_dedup_compact_types(struct btf_dedup *d);
2941 static int btf_dedup_remap_types(struct btf_dedup *d);
2944 * Deduplicate BTF types and strings.
2946 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
2947 * section with all BTF type descriptors and string data. It overwrites that
2948 * memory in-place with deduplicated types and strings without any loss of
2949 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
2950 * is provided, all the strings referenced from .BTF.ext section are honored
2951 * and updated to point to the right offsets after deduplication.
2953 * If function returns with error, type/string data might be garbled and should
2956 * More verbose and detailed description of both problem btf_dedup is solving,
2957 * as well as solution could be found at:
2958 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
2960 * Problem description and justification
2961 * =====================================
2963 * BTF type information is typically emitted either as a result of conversion
2964 * from DWARF to BTF or directly by compiler. In both cases, each compilation
2965 * unit contains information about a subset of all the types that are used
2966 * in an application. These subsets are frequently overlapping and contain a lot
2967 * of duplicated information when later concatenated together into a single
2968 * binary. This algorithm ensures that each unique type is represented by single
2969 * BTF type descriptor, greatly reducing resulting size of BTF data.
2971 * Compilation unit isolation and subsequent duplication of data is not the only
2972 * problem. The same type hierarchy (e.g., struct and all the type that struct
2973 * references) in different compilation units can be represented in BTF to
2974 * various degrees of completeness (or, rather, incompleteness) due to
2975 * struct/union forward declarations.
2977 * Let's take a look at an example, that we'll use to better understand the
2978 * problem (and solution). Suppose we have two compilation units, each using
2979 * same `struct S`, but each of them having incomplete type information about
3008 * In case of CU #1, BTF data will know only that `struct B` exist (but no
3009 * more), but will know the complete type information about `struct A`. While
3010 * for CU #2, it will know full type information about `struct B`, but will
3011 * only know about forward declaration of `struct A` (in BTF terms, it will
3012 * have `BTF_KIND_FWD` type descriptor with name `B`).
3014 * This compilation unit isolation means that it's possible that there is no
3015 * single CU with complete type information describing structs `S`, `A`, and
3016 * `B`. Also, we might get tons of duplicated and redundant type information.
3018 * Additional complication we need to keep in mind comes from the fact that
3019 * types, in general, can form graphs containing cycles, not just DAGs.
3021 * While algorithm does deduplication, it also merges and resolves type
3022 * information (unless disabled throught `struct btf_opts`), whenever possible.
3023 * E.g., in the example above with two compilation units having partial type
3024 * information for structs `A` and `B`, the output of algorithm will emit
3025 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3026 * (as well as type information for `int` and pointers), as if they were defined
3027 * in a single compilation unit as:
3047 * Algorithm completes its work in 6 separate passes:
3049 * 1. Strings deduplication.
3050 * 2. Primitive types deduplication (int, enum, fwd).
3051 * 3. Struct/union types deduplication.
3052 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3053 * protos, and const/volatile/restrict modifiers).
3054 * 5. Types compaction.
3055 * 6. Types remapping.
3057 * Algorithm determines canonical type descriptor, which is a single
3058 * representative type for each truly unique type. This canonical type is the
3059 * one that will go into final deduplicated BTF type information. For
3060 * struct/unions, it is also the type that algorithm will merge additional type
3061 * information into (while resolving FWDs), as it discovers it from data in
3062 * other CUs. Each input BTF type eventually gets either mapped to itself, if
3063 * that type is canonical, or to some other type, if that type is equivalent
3064 * and was chosen as canonical representative. This mapping is stored in
3065 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3066 * FWD type got resolved to.
3068 * To facilitate fast discovery of canonical types, we also maintain canonical
3069 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3070 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3071 * that match that signature. With sufficiently good choice of type signature
3072 * hashing function, we can limit number of canonical types for each unique type
3073 * signature to a very small number, allowing to find canonical type for any
3074 * duplicated type very quickly.
3076 * Struct/union deduplication is the most critical part and algorithm for
3077 * deduplicating structs/unions is described in greater details in comments for
3078 * `btf_dedup_is_equiv` function.
3081 DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
3082 int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
3084 struct btf_dedup *d;
3087 if (!OPTS_VALID(opts, btf_dedup_opts))
3088 return libbpf_err(-EINVAL);
3090 d = btf_dedup_new(btf, opts);
3092 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3093 return libbpf_err(-EINVAL);
3096 if (btf_ensure_modifiable(btf)) {
3101 err = btf_dedup_prep(d);
3103 pr_debug("btf_dedup_prep failed:%d\n", err);
3106 err = btf_dedup_strings(d);
3108 pr_debug("btf_dedup_strings failed:%d\n", err);
3111 err = btf_dedup_prim_types(d);
3113 pr_debug("btf_dedup_prim_types failed:%d\n", err);
3116 err = btf_dedup_struct_types(d);
3118 pr_debug("btf_dedup_struct_types failed:%d\n", err);
3121 err = btf_dedup_ref_types(d);
3123 pr_debug("btf_dedup_ref_types failed:%d\n", err);
3126 err = btf_dedup_compact_types(d);
3128 pr_debug("btf_dedup_compact_types failed:%d\n", err);
3131 err = btf_dedup_remap_types(d);
3133 pr_debug("btf_dedup_remap_types failed:%d\n", err);
3139 return libbpf_err(err);
3142 COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
3143 int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
3145 LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
3148 pr_warn("please use new version of btf__dedup() that supports options\n");
3149 return libbpf_err(-ENOTSUP);
3152 return btf__dedup(btf, &opts);
3155 #define BTF_UNPROCESSED_ID ((__u32)-1)
3156 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3159 /* .BTF section to be deduped in-place */
3162 * Optional .BTF.ext section. When provided, any strings referenced
3163 * from it will be taken into account when deduping strings
3165 struct btf_ext *btf_ext;
3167 * This is a map from any type's signature hash to a list of possible
3168 * canonical representative type candidates. Hash collisions are
3169 * ignored, so even types of various kinds can share same list of
3170 * candidates, which is fine because we rely on subsequent
3171 * btf_xxx_equal() checks to authoritatively verify type equality.
3173 struct hashmap *dedup_table;
3174 /* Canonical types map */
3176 /* Hypothetical mapping, used during type graph equivalence checks */
3181 /* Whether hypothetical mapping, if successful, would need to adjust
3182 * already canonicalized types (due to a new forward declaration to
3183 * concrete type resolution). In such case, during split BTF dedup
3184 * candidate type would still be considered as different, because base
3185 * BTF is considered to be immutable.
3187 bool hypot_adjust_canon;
3188 /* Various option modifying behavior of algorithm */
3189 struct btf_dedup_opts opts;
3190 /* temporary strings deduplication state */
3191 struct strset *strs_set;
3194 static long hash_combine(long h, long value)
3196 return h * 31 + value;
3199 #define for_each_dedup_cand(d, node, hash) \
3200 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
3202 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3204 return hashmap__append(d->dedup_table,
3205 (void *)hash, (void *)(long)type_id);
3208 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3209 __u32 from_id, __u32 to_id)
3211 if (d->hypot_cnt == d->hypot_cap) {
3214 d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3215 new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3218 d->hypot_list = new_list;
3220 d->hypot_list[d->hypot_cnt++] = from_id;
3221 d->hypot_map[from_id] = to_id;
3225 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3229 for (i = 0; i < d->hypot_cnt; i++)
3230 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3232 d->hypot_adjust_canon = false;
3235 static void btf_dedup_free(struct btf_dedup *d)
3237 hashmap__free(d->dedup_table);
3238 d->dedup_table = NULL;
3244 d->hypot_map = NULL;
3246 free(d->hypot_list);
3247 d->hypot_list = NULL;
3252 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
3257 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
3262 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
3267 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3269 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3270 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3271 int i, err = 0, type_cnt;
3274 return ERR_PTR(-ENOMEM);
3276 if (OPTS_GET(opts, force_collisions, false))
3277 hash_fn = btf_dedup_collision_hash_fn;
3280 d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3282 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3283 if (IS_ERR(d->dedup_table)) {
3284 err = PTR_ERR(d->dedup_table);
3285 d->dedup_table = NULL;
3289 type_cnt = btf__type_cnt(btf);
3290 d->map = malloc(sizeof(__u32) * type_cnt);
3295 /* special BTF "void" type is made canonical immediately */
3297 for (i = 1; i < type_cnt; i++) {
3298 struct btf_type *t = btf_type_by_id(d->btf, i);
3300 /* VAR and DATASEC are never deduped and are self-canonical */
3301 if (btf_is_var(t) || btf_is_datasec(t))
3304 d->map[i] = BTF_UNPROCESSED_ID;
3307 d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3308 if (!d->hypot_map) {
3312 for (i = 0; i < type_cnt; i++)
3313 d->hypot_map[i] = BTF_UNPROCESSED_ID;
3318 return ERR_PTR(err);
3325 * Iterate over all possible places in .BTF and .BTF.ext that can reference
3326 * string and pass pointer to it to a provided callback `fn`.
3328 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3332 for (i = 0; i < d->btf->nr_types; i++) {
3333 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3335 r = btf_type_visit_str_offs(t, fn, ctx);
3343 r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3350 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3352 struct btf_dedup *d = ctx;
3353 __u32 str_off = *str_off_ptr;
3357 /* don't touch empty string or string in main BTF */
3358 if (str_off == 0 || str_off < d->btf->start_str_off)
3361 s = btf__str_by_offset(d->btf, str_off);
3362 if (d->btf->base_btf) {
3363 err = btf__find_str(d->btf->base_btf, s);
3372 off = strset__add_str(d->strs_set, s);
3376 *str_off_ptr = d->btf->start_str_off + off;
3381 * Dedup string and filter out those that are not referenced from either .BTF
3382 * or .BTF.ext (if provided) sections.
3384 * This is done by building index of all strings in BTF's string section,
3385 * then iterating over all entities that can reference strings (e.g., type
3386 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3387 * strings as used. After that all used strings are deduped and compacted into
3388 * sequential blob of memory and new offsets are calculated. Then all the string
3389 * references are iterated again and rewritten using new offsets.
3391 static int btf_dedup_strings(struct btf_dedup *d)
3395 if (d->btf->strs_deduped)
3398 d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3399 if (IS_ERR(d->strs_set)) {
3400 err = PTR_ERR(d->strs_set);
3404 if (!d->btf->base_btf) {
3405 /* insert empty string; we won't be looking it up during strings
3406 * dedup, but it's good to have it for generic BTF string lookups
3408 err = strset__add_str(d->strs_set, "");
3413 /* remap string offsets */
3414 err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3418 /* replace BTF string data and hash with deduped ones */
3419 strset__free(d->btf->strs_set);
3420 d->btf->hdr->str_len = strset__data_size(d->strs_set);
3421 d->btf->strs_set = d->strs_set;
3423 d->btf->strs_deduped = true;
3427 strset__free(d->strs_set);
3433 static long btf_hash_common(struct btf_type *t)
3437 h = hash_combine(0, t->name_off);
3438 h = hash_combine(h, t->info);
3439 h = hash_combine(h, t->size);
3443 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3445 return t1->name_off == t2->name_off &&
3446 t1->info == t2->info &&
3447 t1->size == t2->size;
3450 /* Calculate type signature hash of INT or TAG. */
3451 static long btf_hash_int_decl_tag(struct btf_type *t)
3453 __u32 info = *(__u32 *)(t + 1);
3456 h = btf_hash_common(t);
3457 h = hash_combine(h, info);
3461 /* Check structural equality of two INTs or TAGs. */
3462 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3466 if (!btf_equal_common(t1, t2))
3468 info1 = *(__u32 *)(t1 + 1);
3469 info2 = *(__u32 *)(t2 + 1);
3470 return info1 == info2;
3473 /* Calculate type signature hash of ENUM. */
3474 static long btf_hash_enum(struct btf_type *t)
3478 /* don't hash vlen and enum members to support enum fwd resolving */
3479 h = hash_combine(0, t->name_off);
3480 h = hash_combine(h, t->info & ~0xffff);
3481 h = hash_combine(h, t->size);
3485 /* Check structural equality of two ENUMs. */
3486 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3488 const struct btf_enum *m1, *m2;
3492 if (!btf_equal_common(t1, t2))
3495 vlen = btf_vlen(t1);
3498 for (i = 0; i < vlen; i++) {
3499 if (m1->name_off != m2->name_off || m1->val != m2->val)
3507 static inline bool btf_is_enum_fwd(struct btf_type *t)
3509 return btf_is_enum(t) && btf_vlen(t) == 0;
3512 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3514 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3515 return btf_equal_enum(t1, t2);
3516 /* ignore vlen when comparing */
3517 return t1->name_off == t2->name_off &&
3518 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
3519 t1->size == t2->size;
3523 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3524 * as referenced type IDs equivalence is established separately during type
3525 * graph equivalence check algorithm.
3527 static long btf_hash_struct(struct btf_type *t)
3529 const struct btf_member *member = btf_members(t);
3530 __u32 vlen = btf_vlen(t);
3531 long h = btf_hash_common(t);
3534 for (i = 0; i < vlen; i++) {
3535 h = hash_combine(h, member->name_off);
3536 h = hash_combine(h, member->offset);
3537 /* no hashing of referenced type ID, it can be unresolved yet */
3544 * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3545 * type IDs. This check is performed during type graph equivalence check and
3546 * referenced types equivalence is checked separately.
3548 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3550 const struct btf_member *m1, *m2;
3554 if (!btf_equal_common(t1, t2))
3557 vlen = btf_vlen(t1);
3558 m1 = btf_members(t1);
3559 m2 = btf_members(t2);
3560 for (i = 0; i < vlen; i++) {
3561 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3570 * Calculate type signature hash of ARRAY, including referenced type IDs,
3571 * under assumption that they were already resolved to canonical type IDs and
3572 * are not going to change.
3574 static long btf_hash_array(struct btf_type *t)
3576 const struct btf_array *info = btf_array(t);
3577 long h = btf_hash_common(t);
3579 h = hash_combine(h, info->type);
3580 h = hash_combine(h, info->index_type);
3581 h = hash_combine(h, info->nelems);
3586 * Check exact equality of two ARRAYs, taking into account referenced
3587 * type IDs, under assumption that they were already resolved to canonical
3588 * type IDs and are not going to change.
3589 * This function is called during reference types deduplication to compare
3590 * ARRAY to potential canonical representative.
3592 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3594 const struct btf_array *info1, *info2;
3596 if (!btf_equal_common(t1, t2))
3599 info1 = btf_array(t1);
3600 info2 = btf_array(t2);
3601 return info1->type == info2->type &&
3602 info1->index_type == info2->index_type &&
3603 info1->nelems == info2->nelems;
3607 * Check structural compatibility of two ARRAYs, ignoring referenced type
3608 * IDs. This check is performed during type graph equivalence check and
3609 * referenced types equivalence is checked separately.
3611 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3613 if (!btf_equal_common(t1, t2))
3616 return btf_array(t1)->nelems == btf_array(t2)->nelems;
3620 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3621 * under assumption that they were already resolved to canonical type IDs and
3622 * are not going to change.
3624 static long btf_hash_fnproto(struct btf_type *t)
3626 const struct btf_param *member = btf_params(t);
3627 __u16 vlen = btf_vlen(t);
3628 long h = btf_hash_common(t);
3631 for (i = 0; i < vlen; i++) {
3632 h = hash_combine(h, member->name_off);
3633 h = hash_combine(h, member->type);
3640 * Check exact equality of two FUNC_PROTOs, taking into account referenced
3641 * type IDs, under assumption that they were already resolved to canonical
3642 * type IDs and are not going to change.
3643 * This function is called during reference types deduplication to compare
3644 * FUNC_PROTO to potential canonical representative.
3646 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3648 const struct btf_param *m1, *m2;
3652 if (!btf_equal_common(t1, t2))
3655 vlen = btf_vlen(t1);
3656 m1 = btf_params(t1);
3657 m2 = btf_params(t2);
3658 for (i = 0; i < vlen; i++) {
3659 if (m1->name_off != m2->name_off || m1->type != m2->type)
3668 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3669 * IDs. This check is performed during type graph equivalence check and
3670 * referenced types equivalence is checked separately.
3672 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3674 const struct btf_param *m1, *m2;
3678 /* skip return type ID */
3679 if (t1->name_off != t2->name_off || t1->info != t2->info)
3682 vlen = btf_vlen(t1);
3683 m1 = btf_params(t1);
3684 m2 = btf_params(t2);
3685 for (i = 0; i < vlen; i++) {
3686 if (m1->name_off != m2->name_off)
3694 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3695 * types and initializing the rest of the state (canonical type mapping) for
3696 * the fixed base BTF part.
3698 static int btf_dedup_prep(struct btf_dedup *d)
3704 if (!d->btf->base_btf)
3707 for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3708 t = btf_type_by_id(d->btf, type_id);
3710 /* all base BTF types are self-canonical by definition */
3711 d->map[type_id] = type_id;
3713 switch (btf_kind(t)) {
3715 case BTF_KIND_DATASEC:
3716 /* VAR and DATASEC are never hash/deduplicated */
3718 case BTF_KIND_CONST:
3719 case BTF_KIND_VOLATILE:
3720 case BTF_KIND_RESTRICT:
3723 case BTF_KIND_TYPEDEF:
3725 case BTF_KIND_FLOAT:
3726 case BTF_KIND_TYPE_TAG:
3727 h = btf_hash_common(t);
3730 case BTF_KIND_DECL_TAG:
3731 h = btf_hash_int_decl_tag(t);
3734 h = btf_hash_enum(t);
3736 case BTF_KIND_STRUCT:
3737 case BTF_KIND_UNION:
3738 h = btf_hash_struct(t);
3740 case BTF_KIND_ARRAY:
3741 h = btf_hash_array(t);
3743 case BTF_KIND_FUNC_PROTO:
3744 h = btf_hash_fnproto(t);
3747 pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3750 if (btf_dedup_table_add(d, h, type_id))
3758 * Deduplicate primitive types, that can't reference other types, by calculating
3759 * their type signature hash and comparing them with any possible canonical
3760 * candidate. If no canonical candidate matches, type itself is marked as
3761 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3763 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3765 struct btf_type *t = btf_type_by_id(d->btf, type_id);
3766 struct hashmap_entry *hash_entry;
3767 struct btf_type *cand;
3768 /* if we don't find equivalent type, then we are canonical */
3769 __u32 new_id = type_id;
3773 switch (btf_kind(t)) {
3774 case BTF_KIND_CONST:
3775 case BTF_KIND_VOLATILE:
3776 case BTF_KIND_RESTRICT:
3778 case BTF_KIND_TYPEDEF:
3779 case BTF_KIND_ARRAY:
3780 case BTF_KIND_STRUCT:
3781 case BTF_KIND_UNION:
3783 case BTF_KIND_FUNC_PROTO:
3785 case BTF_KIND_DATASEC:
3786 case BTF_KIND_DECL_TAG:
3787 case BTF_KIND_TYPE_TAG:
3791 h = btf_hash_int_decl_tag(t);
3792 for_each_dedup_cand(d, hash_entry, h) {
3793 cand_id = (__u32)(long)hash_entry->value;
3794 cand = btf_type_by_id(d->btf, cand_id);
3795 if (btf_equal_int_tag(t, cand)) {
3803 h = btf_hash_enum(t);
3804 for_each_dedup_cand(d, hash_entry, h) {
3805 cand_id = (__u32)(long)hash_entry->value;
3806 cand = btf_type_by_id(d->btf, cand_id);
3807 if (btf_equal_enum(t, cand)) {
3811 if (btf_compat_enum(t, cand)) {
3812 if (btf_is_enum_fwd(t)) {
3813 /* resolve fwd to full enum */
3817 /* resolve canonical enum fwd to full enum */
3818 d->map[cand_id] = type_id;
3824 case BTF_KIND_FLOAT:
3825 h = btf_hash_common(t);
3826 for_each_dedup_cand(d, hash_entry, h) {
3827 cand_id = (__u32)(long)hash_entry->value;
3828 cand = btf_type_by_id(d->btf, cand_id);
3829 if (btf_equal_common(t, cand)) {
3840 d->map[type_id] = new_id;
3841 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3847 static int btf_dedup_prim_types(struct btf_dedup *d)
3851 for (i = 0; i < d->btf->nr_types; i++) {
3852 err = btf_dedup_prim_type(d, d->btf->start_id + i);
3860 * Check whether type is already mapped into canonical one (could be to itself).
3862 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
3864 return d->map[type_id] <= BTF_MAX_NR_TYPES;
3868 * Resolve type ID into its canonical type ID, if any; otherwise return original
3869 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
3870 * STRUCT/UNION link and resolve it into canonical type ID as well.
3872 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
3874 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
3875 type_id = d->map[type_id];
3880 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
3883 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
3885 __u32 orig_type_id = type_id;
3887 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
3890 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
3891 type_id = d->map[type_id];
3893 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
3896 return orig_type_id;
3900 static inline __u16 btf_fwd_kind(struct btf_type *t)
3902 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
3905 /* Check if given two types are identical ARRAY definitions */
3906 static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
3908 struct btf_type *t1, *t2;
3910 t1 = btf_type_by_id(d->btf, id1);
3911 t2 = btf_type_by_id(d->btf, id2);
3912 if (!btf_is_array(t1) || !btf_is_array(t2))
3915 return btf_equal_array(t1, t2);
3918 /* Check if given two types are identical STRUCT/UNION definitions */
3919 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
3921 const struct btf_member *m1, *m2;
3922 struct btf_type *t1, *t2;
3925 t1 = btf_type_by_id(d->btf, id1);
3926 t2 = btf_type_by_id(d->btf, id2);
3928 if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
3931 if (!btf_shallow_equal_struct(t1, t2))
3934 m1 = btf_members(t1);
3935 m2 = btf_members(t2);
3936 for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
3937 if (m1->type != m2->type)
3944 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
3945 * call it "candidate graph" in this description for brevity) to a type graph
3946 * formed by (potential) canonical struct/union ("canonical graph" for brevity
3947 * here, though keep in mind that not all types in canonical graph are
3948 * necessarily canonical representatives themselves, some of them might be
3949 * duplicates or its uniqueness might not have been established yet).
3951 * - >0, if type graphs are equivalent;
3952 * - 0, if not equivalent;
3955 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
3956 * equivalence of BTF types at each step. If at any point BTF types in candidate
3957 * and canonical graphs are not compatible structurally, whole graphs are
3958 * incompatible. If types are structurally equivalent (i.e., all information
3959 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
3960 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
3961 * If a type references other types, then those referenced types are checked
3962 * for equivalence recursively.
3964 * During DFS traversal, if we find that for current `canon_id` type we
3965 * already have some mapping in hypothetical map, we check for two possible
3967 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
3968 * happen when type graphs have cycles. In this case we assume those two
3969 * types are equivalent.
3970 * - `canon_id` is mapped to different type. This is contradiction in our
3971 * hypothetical mapping, because same graph in canonical graph corresponds
3972 * to two different types in candidate graph, which for equivalent type
3973 * graphs shouldn't happen. This condition terminates equivalence check
3974 * with negative result.
3976 * If type graphs traversal exhausts types to check and find no contradiction,
3977 * then type graphs are equivalent.
3979 * When checking types for equivalence, there is one special case: FWD types.
3980 * If FWD type resolution is allowed and one of the types (either from canonical
3981 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
3982 * flag) and their names match, hypothetical mapping is updated to point from
3983 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
3984 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
3986 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
3987 * if there are two exactly named (or anonymous) structs/unions that are
3988 * compatible structurally, one of which has FWD field, while other is concrete
3989 * STRUCT/UNION, but according to C sources they are different structs/unions
3990 * that are referencing different types with the same name. This is extremely
3991 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
3992 * this logic is causing problems.
3994 * Doing FWD resolution means that both candidate and/or canonical graphs can
3995 * consists of portions of the graph that come from multiple compilation units.
3996 * This is due to the fact that types within single compilation unit are always
3997 * deduplicated and FWDs are already resolved, if referenced struct/union
3998 * definiton is available. So, if we had unresolved FWD and found corresponding
3999 * STRUCT/UNION, they will be from different compilation units. This
4000 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4001 * type graph will likely have at least two different BTF types that describe
4002 * same type (e.g., most probably there will be two different BTF types for the
4003 * same 'int' primitive type) and could even have "overlapping" parts of type
4004 * graph that describe same subset of types.
4006 * This in turn means that our assumption that each type in canonical graph
4007 * must correspond to exactly one type in candidate graph might not hold
4008 * anymore and will make it harder to detect contradictions using hypothetical
4009 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4010 * resolution only in canonical graph. FWDs in candidate graphs are never
4011 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4013 * - Both types in canonical and candidate graphs are FWDs. If they are
4014 * structurally equivalent, then they can either be both resolved to the
4015 * same STRUCT/UNION or not resolved at all. In both cases they are
4016 * equivalent and there is no need to resolve FWD on candidate side.
4017 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4018 * so nothing to resolve as well, algorithm will check equivalence anyway.
4019 * - Type in canonical graph is FWD, while type in candidate is concrete
4020 * STRUCT/UNION. In this case candidate graph comes from single compilation
4021 * unit, so there is exactly one BTF type for each unique C type. After
4022 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
4023 * in canonical graph mapping to single BTF type in candidate graph, but
4024 * because hypothetical mapping maps from canonical to candidate types, it's
4025 * alright, and we still maintain the property of having single `canon_id`
4026 * mapping to single `cand_id` (there could be two different `canon_id`
4027 * mapped to the same `cand_id`, but it's not contradictory).
4028 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4029 * graph is FWD. In this case we are just going to check compatibility of
4030 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4031 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4032 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4033 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4036 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4039 struct btf_type *cand_type;
4040 struct btf_type *canon_type;
4041 __u32 hypot_type_id;
4046 /* if both resolve to the same canonical, they must be equivalent */
4047 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4050 canon_id = resolve_fwd_id(d, canon_id);
4052 hypot_type_id = d->hypot_map[canon_id];
4053 if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4054 if (hypot_type_id == cand_id)
4056 /* In some cases compiler will generate different DWARF types
4057 * for *identical* array type definitions and use them for
4058 * different fields within the *same* struct. This breaks type
4059 * equivalence check, which makes an assumption that candidate
4060 * types sub-graph has a consistent and deduped-by-compiler
4061 * types within a single CU. So work around that by explicitly
4062 * allowing identical array types here.
4064 if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4066 /* It turns out that similar situation can happen with
4067 * struct/union sometimes, sigh... Handle the case where
4068 * structs/unions are exactly the same, down to the referenced
4069 * type IDs. Anything more complicated (e.g., if referenced
4070 * types are different, but equivalent) is *way more*
4071 * complicated and requires a many-to-many equivalence mapping.
4073 if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4078 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4081 cand_type = btf_type_by_id(d->btf, cand_id);
4082 canon_type = btf_type_by_id(d->btf, canon_id);
4083 cand_kind = btf_kind(cand_type);
4084 canon_kind = btf_kind(canon_type);
4086 if (cand_type->name_off != canon_type->name_off)
4089 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
4090 if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4091 && cand_kind != canon_kind) {
4095 if (cand_kind == BTF_KIND_FWD) {
4096 real_kind = canon_kind;
4097 fwd_kind = btf_fwd_kind(cand_type);
4099 real_kind = cand_kind;
4100 fwd_kind = btf_fwd_kind(canon_type);
4101 /* we'd need to resolve base FWD to STRUCT/UNION */
4102 if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4103 d->hypot_adjust_canon = true;
4105 return fwd_kind == real_kind;
4108 if (cand_kind != canon_kind)
4111 switch (cand_kind) {
4113 return btf_equal_int_tag(cand_type, canon_type);
4116 return btf_compat_enum(cand_type, canon_type);
4119 case BTF_KIND_FLOAT:
4120 return btf_equal_common(cand_type, canon_type);
4122 case BTF_KIND_CONST:
4123 case BTF_KIND_VOLATILE:
4124 case BTF_KIND_RESTRICT:
4126 case BTF_KIND_TYPEDEF:
4128 case BTF_KIND_TYPE_TAG:
4129 if (cand_type->info != canon_type->info)
4131 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4133 case BTF_KIND_ARRAY: {
4134 const struct btf_array *cand_arr, *canon_arr;
4136 if (!btf_compat_array(cand_type, canon_type))
4138 cand_arr = btf_array(cand_type);
4139 canon_arr = btf_array(canon_type);
4140 eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4143 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4146 case BTF_KIND_STRUCT:
4147 case BTF_KIND_UNION: {
4148 const struct btf_member *cand_m, *canon_m;
4151 if (!btf_shallow_equal_struct(cand_type, canon_type))
4153 vlen = btf_vlen(cand_type);
4154 cand_m = btf_members(cand_type);
4155 canon_m = btf_members(canon_type);
4156 for (i = 0; i < vlen; i++) {
4157 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4167 case BTF_KIND_FUNC_PROTO: {
4168 const struct btf_param *cand_p, *canon_p;
4171 if (!btf_compat_fnproto(cand_type, canon_type))
4173 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4176 vlen = btf_vlen(cand_type);
4177 cand_p = btf_params(cand_type);
4178 canon_p = btf_params(canon_type);
4179 for (i = 0; i < vlen; i++) {
4180 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4196 * Use hypothetical mapping, produced by successful type graph equivalence
4197 * check, to augment existing struct/union canonical mapping, where possible.
4199 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4200 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4201 * it doesn't matter if FWD type was part of canonical graph or candidate one,
4202 * we are recording the mapping anyway. As opposed to carefulness required
4203 * for struct/union correspondence mapping (described below), for FWD resolution
4204 * it's not important, as by the time that FWD type (reference type) will be
4205 * deduplicated all structs/unions will be deduped already anyway.
4207 * Recording STRUCT/UNION mapping is purely a performance optimization and is
4208 * not required for correctness. It needs to be done carefully to ensure that
4209 * struct/union from candidate's type graph is not mapped into corresponding
4210 * struct/union from canonical type graph that itself hasn't been resolved into
4211 * canonical representative. The only guarantee we have is that canonical
4212 * struct/union was determined as canonical and that won't change. But any
4213 * types referenced through that struct/union fields could have been not yet
4214 * resolved, so in case like that it's too early to establish any kind of
4215 * correspondence between structs/unions.
4217 * No canonical correspondence is derived for primitive types (they are already
4218 * deduplicated completely already anyway) or reference types (they rely on
4219 * stability of struct/union canonical relationship for equivalence checks).
4221 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4223 __u32 canon_type_id, targ_type_id;
4224 __u16 t_kind, c_kind;
4228 for (i = 0; i < d->hypot_cnt; i++) {
4229 canon_type_id = d->hypot_list[i];
4230 targ_type_id = d->hypot_map[canon_type_id];
4231 t_id = resolve_type_id(d, targ_type_id);
4232 c_id = resolve_type_id(d, canon_type_id);
4233 t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4234 c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4236 * Resolve FWD into STRUCT/UNION.
4237 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4238 * mapped to canonical representative (as opposed to
4239 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4240 * eventually that struct is going to be mapped and all resolved
4241 * FWDs will automatically resolve to correct canonical
4242 * representative. This will happen before ref type deduping,
4243 * which critically depends on stability of these mapping. This
4244 * stability is not a requirement for STRUCT/UNION equivalence
4248 /* if it's the split BTF case, we still need to point base FWD
4249 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4250 * will be resolved against base FWD. If we don't point base
4251 * canonical FWD to the resolved STRUCT/UNION, then all the
4252 * FWDs in split BTF won't be correctly resolved to a proper
4255 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4256 d->map[c_id] = t_id;
4258 /* if graph equivalence determined that we'd need to adjust
4259 * base canonical types, then we need to only point base FWDs
4260 * to STRUCTs/UNIONs and do no more modifications. For all
4261 * other purposes the type graphs were not equivalent.
4263 if (d->hypot_adjust_canon)
4266 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4267 d->map[t_id] = c_id;
4269 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4270 c_kind != BTF_KIND_FWD &&
4271 is_type_mapped(d, c_id) &&
4272 !is_type_mapped(d, t_id)) {
4274 * as a perf optimization, we can map struct/union
4275 * that's part of type graph we just verified for
4276 * equivalence. We can do that for struct/union that has
4277 * canonical representative only, though.
4279 d->map[t_id] = c_id;
4285 * Deduplicate struct/union types.
4287 * For each struct/union type its type signature hash is calculated, taking
4288 * into account type's name, size, number, order and names of fields, but
4289 * ignoring type ID's referenced from fields, because they might not be deduped
4290 * completely until after reference types deduplication phase. This type hash
4291 * is used to iterate over all potential canonical types, sharing same hash.
4292 * For each canonical candidate we check whether type graphs that they form
4293 * (through referenced types in fields and so on) are equivalent using algorithm
4294 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4295 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4296 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4297 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4298 * potentially map other structs/unions to their canonical representatives,
4299 * if such relationship hasn't yet been established. This speeds up algorithm
4300 * by eliminating some of the duplicate work.
4302 * If no matching canonical representative was found, struct/union is marked
4303 * as canonical for itself and is added into btf_dedup->dedup_table hash map
4304 * for further look ups.
4306 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4308 struct btf_type *cand_type, *t;
4309 struct hashmap_entry *hash_entry;
4310 /* if we don't find equivalent type, then we are canonical */
4311 __u32 new_id = type_id;
4315 /* already deduped or is in process of deduping (loop detected) */
4316 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4319 t = btf_type_by_id(d->btf, type_id);
4322 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4325 h = btf_hash_struct(t);
4326 for_each_dedup_cand(d, hash_entry, h) {
4327 __u32 cand_id = (__u32)(long)hash_entry->value;
4331 * Even though btf_dedup_is_equiv() checks for
4332 * btf_shallow_equal_struct() internally when checking two
4333 * structs (unions) for equivalence, we need to guard here
4334 * from picking matching FWD type as a dedup candidate.
4335 * This can happen due to hash collision. In such case just
4336 * relying on btf_dedup_is_equiv() would lead to potentially
4337 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4338 * FWD and compatible STRUCT/UNION are considered equivalent.
4340 cand_type = btf_type_by_id(d->btf, cand_id);
4341 if (!btf_shallow_equal_struct(t, cand_type))
4344 btf_dedup_clear_hypot_map(d);
4345 eq = btf_dedup_is_equiv(d, type_id, cand_id);
4350 btf_dedup_merge_hypot_map(d);
4351 if (d->hypot_adjust_canon) /* not really equivalent */
4357 d->map[type_id] = new_id;
4358 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4364 static int btf_dedup_struct_types(struct btf_dedup *d)
4368 for (i = 0; i < d->btf->nr_types; i++) {
4369 err = btf_dedup_struct_type(d, d->btf->start_id + i);
4377 * Deduplicate reference type.
4379 * Once all primitive and struct/union types got deduplicated, we can easily
4380 * deduplicate all other (reference) BTF types. This is done in two steps:
4382 * 1. Resolve all referenced type IDs into their canonical type IDs. This
4383 * resolution can be done either immediately for primitive or struct/union types
4384 * (because they were deduped in previous two phases) or recursively for
4385 * reference types. Recursion will always terminate at either primitive or
4386 * struct/union type, at which point we can "unwind" chain of reference types
4387 * one by one. There is no danger of encountering cycles because in C type
4388 * system the only way to form type cycle is through struct/union, so any chain
4389 * of reference types, even those taking part in a type cycle, will inevitably
4390 * reach struct/union at some point.
4392 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4393 * becomes "stable", in the sense that no further deduplication will cause
4394 * any changes to it. With that, it's now possible to calculate type's signature
4395 * hash (this time taking into account referenced type IDs) and loop over all
4396 * potential canonical representatives. If no match was found, current type
4397 * will become canonical representative of itself and will be added into
4398 * btf_dedup->dedup_table as another possible canonical representative.
4400 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4402 struct hashmap_entry *hash_entry;
4403 __u32 new_id = type_id, cand_id;
4404 struct btf_type *t, *cand;
4405 /* if we don't find equivalent type, then we are representative type */
4409 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4411 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4412 return resolve_type_id(d, type_id);
4414 t = btf_type_by_id(d->btf, type_id);
4415 d->map[type_id] = BTF_IN_PROGRESS_ID;
4417 switch (btf_kind(t)) {
4418 case BTF_KIND_CONST:
4419 case BTF_KIND_VOLATILE:
4420 case BTF_KIND_RESTRICT:
4422 case BTF_KIND_TYPEDEF:
4424 case BTF_KIND_TYPE_TAG:
4425 ref_type_id = btf_dedup_ref_type(d, t->type);
4426 if (ref_type_id < 0)
4428 t->type = ref_type_id;
4430 h = btf_hash_common(t);
4431 for_each_dedup_cand(d, hash_entry, h) {
4432 cand_id = (__u32)(long)hash_entry->value;
4433 cand = btf_type_by_id(d->btf, cand_id);
4434 if (btf_equal_common(t, cand)) {
4441 case BTF_KIND_DECL_TAG:
4442 ref_type_id = btf_dedup_ref_type(d, t->type);
4443 if (ref_type_id < 0)
4445 t->type = ref_type_id;
4447 h = btf_hash_int_decl_tag(t);
4448 for_each_dedup_cand(d, hash_entry, h) {
4449 cand_id = (__u32)(long)hash_entry->value;
4450 cand = btf_type_by_id(d->btf, cand_id);
4451 if (btf_equal_int_tag(t, cand)) {
4458 case BTF_KIND_ARRAY: {
4459 struct btf_array *info = btf_array(t);
4461 ref_type_id = btf_dedup_ref_type(d, info->type);
4462 if (ref_type_id < 0)
4464 info->type = ref_type_id;
4466 ref_type_id = btf_dedup_ref_type(d, info->index_type);
4467 if (ref_type_id < 0)
4469 info->index_type = ref_type_id;
4471 h = btf_hash_array(t);
4472 for_each_dedup_cand(d, hash_entry, h) {
4473 cand_id = (__u32)(long)hash_entry->value;
4474 cand = btf_type_by_id(d->btf, cand_id);
4475 if (btf_equal_array(t, cand)) {
4483 case BTF_KIND_FUNC_PROTO: {
4484 struct btf_param *param;
4488 ref_type_id = btf_dedup_ref_type(d, t->type);
4489 if (ref_type_id < 0)
4491 t->type = ref_type_id;
4494 param = btf_params(t);
4495 for (i = 0; i < vlen; i++) {
4496 ref_type_id = btf_dedup_ref_type(d, param->type);
4497 if (ref_type_id < 0)
4499 param->type = ref_type_id;
4503 h = btf_hash_fnproto(t);
4504 for_each_dedup_cand(d, hash_entry, h) {
4505 cand_id = (__u32)(long)hash_entry->value;
4506 cand = btf_type_by_id(d->btf, cand_id);
4507 if (btf_equal_fnproto(t, cand)) {
4519 d->map[type_id] = new_id;
4520 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4526 static int btf_dedup_ref_types(struct btf_dedup *d)
4530 for (i = 0; i < d->btf->nr_types; i++) {
4531 err = btf_dedup_ref_type(d, d->btf->start_id + i);
4535 /* we won't need d->dedup_table anymore */
4536 hashmap__free(d->dedup_table);
4537 d->dedup_table = NULL;
4544 * After we established for each type its corresponding canonical representative
4545 * type, we now can eliminate types that are not canonical and leave only
4546 * canonical ones layed out sequentially in memory by copying them over
4547 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4548 * a map from original type ID to a new compacted type ID, which will be used
4549 * during next phase to "fix up" type IDs, referenced from struct/union and
4552 static int btf_dedup_compact_types(struct btf_dedup *d)
4555 __u32 next_type_id = d->btf->start_id;
4556 const struct btf_type *t;
4560 /* we are going to reuse hypot_map to store compaction remapping */
4561 d->hypot_map[0] = 0;
4562 /* base BTF types are not renumbered */
4563 for (id = 1; id < d->btf->start_id; id++)
4564 d->hypot_map[id] = id;
4565 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4566 d->hypot_map[id] = BTF_UNPROCESSED_ID;
4568 p = d->btf->types_data;
4570 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4571 if (d->map[id] != id)
4574 t = btf__type_by_id(d->btf, id);
4575 len = btf_type_size(t);
4580 d->hypot_map[id] = next_type_id;
4581 d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4586 /* shrink struct btf's internal types index and update btf_header */
4587 d->btf->nr_types = next_type_id - d->btf->start_id;
4588 d->btf->type_offs_cap = d->btf->nr_types;
4589 d->btf->hdr->type_len = p - d->btf->types_data;
4590 new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4592 if (d->btf->type_offs_cap && !new_offs)
4594 d->btf->type_offs = new_offs;
4595 d->btf->hdr->str_off = d->btf->hdr->type_len;
4596 d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4601 * Figure out final (deduplicated and compacted) type ID for provided original
4602 * `type_id` by first resolving it into corresponding canonical type ID and
4603 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4604 * which is populated during compaction phase.
4606 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4608 struct btf_dedup *d = ctx;
4609 __u32 resolved_type_id, new_type_id;
4611 resolved_type_id = resolve_type_id(d, *type_id);
4612 new_type_id = d->hypot_map[resolved_type_id];
4613 if (new_type_id > BTF_MAX_NR_TYPES)
4616 *type_id = new_type_id;
4621 * Remap referenced type IDs into deduped type IDs.
4623 * After BTF types are deduplicated and compacted, their final type IDs may
4624 * differ from original ones. The map from original to a corresponding
4625 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4626 * compaction phase. During remapping phase we are rewriting all type IDs
4627 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4628 * their final deduped type IDs.
4630 static int btf_dedup_remap_types(struct btf_dedup *d)
4634 for (i = 0; i < d->btf->nr_types; i++) {
4635 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4637 r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4645 r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4653 * Probe few well-known locations for vmlinux kernel image and try to load BTF
4654 * data out of it to use for target BTF.
4656 struct btf *btf__load_vmlinux_btf(void)
4659 const char *path_fmt;
4662 /* try canonical vmlinux BTF through sysfs first */
4663 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
4664 /* fall back to trying to find vmlinux ELF on disk otherwise */
4665 { "/boot/vmlinux-%1$s" },
4666 { "/lib/modules/%1$s/vmlinux-%1$s" },
4667 { "/lib/modules/%1$s/build/vmlinux" },
4668 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
4669 { "/usr/lib/debug/boot/vmlinux-%1$s" },
4670 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
4671 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
4673 char path[PATH_MAX + 1];
4680 for (i = 0; i < ARRAY_SIZE(locations); i++) {
4681 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
4683 if (access(path, R_OK))
4686 if (locations[i].raw_btf)
4687 btf = btf__parse_raw(path);
4689 btf = btf__parse_elf(path, NULL);
4690 err = libbpf_get_error(btf);
4691 pr_debug("loading kernel BTF '%s': %d\n", path, err);
4698 pr_warn("failed to find valid kernel BTF\n");
4699 return libbpf_err_ptr(-ESRCH);
4702 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
4704 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
4708 snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
4709 return btf__parse_split(path, vmlinux_btf);
4712 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
4716 switch (btf_kind(t)) {
4718 case BTF_KIND_FLOAT:
4723 case BTF_KIND_CONST:
4724 case BTF_KIND_VOLATILE:
4725 case BTF_KIND_RESTRICT:
4727 case BTF_KIND_TYPEDEF:
4730 case BTF_KIND_DECL_TAG:
4731 case BTF_KIND_TYPE_TAG:
4732 return visit(&t->type, ctx);
4734 case BTF_KIND_ARRAY: {
4735 struct btf_array *a = btf_array(t);
4737 err = visit(&a->type, ctx);
4738 err = err ?: visit(&a->index_type, ctx);
4742 case BTF_KIND_STRUCT:
4743 case BTF_KIND_UNION: {
4744 struct btf_member *m = btf_members(t);
4746 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4747 err = visit(&m->type, ctx);
4754 case BTF_KIND_FUNC_PROTO: {
4755 struct btf_param *m = btf_params(t);
4757 err = visit(&t->type, ctx);
4760 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4761 err = visit(&m->type, ctx);
4768 case BTF_KIND_DATASEC: {
4769 struct btf_var_secinfo *m = btf_var_secinfos(t);
4771 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4772 err = visit(&m->type, ctx);
4784 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
4788 err = visit(&t->name_off, ctx);
4792 switch (btf_kind(t)) {
4793 case BTF_KIND_STRUCT:
4794 case BTF_KIND_UNION: {
4795 struct btf_member *m = btf_members(t);
4797 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4798 err = visit(&m->name_off, ctx);
4804 case BTF_KIND_ENUM: {
4805 struct btf_enum *m = btf_enum(t);
4807 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4808 err = visit(&m->name_off, ctx);
4814 case BTF_KIND_FUNC_PROTO: {
4815 struct btf_param *m = btf_params(t);
4817 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4818 err = visit(&m->name_off, ctx);
4831 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
4833 const struct btf_ext_info *seg;
4834 struct btf_ext_info_sec *sec;
4837 seg = &btf_ext->func_info;
4838 for_each_btf_ext_sec(seg, sec) {
4839 struct bpf_func_info_min *rec;
4841 for_each_btf_ext_rec(seg, sec, i, rec) {
4842 err = visit(&rec->type_id, ctx);
4848 seg = &btf_ext->core_relo_info;
4849 for_each_btf_ext_sec(seg, sec) {
4850 struct bpf_core_relo *rec;
4852 for_each_btf_ext_rec(seg, sec, i, rec) {
4853 err = visit(&rec->type_id, ctx);
4862 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
4864 const struct btf_ext_info *seg;
4865 struct btf_ext_info_sec *sec;
4868 seg = &btf_ext->func_info;
4869 for_each_btf_ext_sec(seg, sec) {
4870 err = visit(&sec->sec_name_off, ctx);
4875 seg = &btf_ext->line_info;
4876 for_each_btf_ext_sec(seg, sec) {
4877 struct bpf_line_info_min *rec;
4879 err = visit(&sec->sec_name_off, ctx);
4883 for_each_btf_ext_rec(seg, sec, i, rec) {
4884 err = visit(&rec->file_name_off, ctx);
4887 err = visit(&rec->line_off, ctx);
4893 seg = &btf_ext->core_relo_info;
4894 for_each_btf_ext_sec(seg, sec) {
4895 struct bpf_core_relo *rec;
4897 err = visit(&sec->sec_name_off, ctx);
4901 for_each_btf_ext_rec(seg, sec, i, rec) {
4902 err = visit(&rec->access_str_off, ctx);