libbpf: Replace btf__type_by_id() with btf_type_by_id().
authorAlexei Starovoitov <ast@kernel.org>
Wed, 1 Dec 2021 18:10:24 +0000 (10:10 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Thu, 2 Dec 2021 19:18:34 +0000 (11:18 -0800)
To prepare relo_core.c to be compiled in the kernel and the user space
replace btf__type_by_id with btf_type_by_id.

In libbpf btf__type_by_id and btf_type_by_id have different behavior.

bpf_core_apply_relo_insn() needs behavior of uapi btf__type_by_id
vs internal btf_type_by_id, but type_id range check is already done
in bpf_core_apply_relo(), so it's safe to replace it everywhere.
The kernel btf_type_by_id() does the check anyway. It doesn't hurt.

Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211201181040.23337-2-alexei.starovoitov@gmail.com
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf_internal.h
tools/lib/bpf/relo_core.c

index 8024fe3..0d7b16e 100644 (file)
@@ -454,7 +454,7 @@ const struct btf *btf__base_btf(const struct btf *btf)
 }
 
 /* internal helper returning non-const pointer to a type */
-struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
+struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
 {
        if (type_id == 0)
                return &btf_void;
index 311905d..6f143e9 100644 (file)
@@ -172,7 +172,7 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
 struct btf;
 struct btf_type;
 
-struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
+struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
 const char *btf_kind_str(const struct btf_type *t);
 const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
 
index b5b8956..c0904f4 100644 (file)
@@ -51,7 +51,7 @@ static bool is_flex_arr(const struct btf *btf,
                return false;
 
        /* has to be the last member of enclosing struct */
-       t = btf__type_by_id(btf, acc->type_id);
+       t = btf_type_by_id(btf, acc->type_id);
        return acc->idx == btf_vlen(t) - 1;
 }
 
@@ -388,7 +388,7 @@ static int bpf_core_match_member(const struct btf *local_btf,
                return 0;
 
        local_id = local_acc->type_id;
-       local_type = btf__type_by_id(local_btf, local_id);
+       local_type = btf_type_by_id(local_btf, local_id);
        local_member = btf_members(local_type) + local_acc->idx;
        local_name = btf__name_by_offset(local_btf, local_member->name_off);
 
@@ -580,7 +580,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
                return -EUCLEAN; /* request instruction poisoning */
 
        acc = &spec->spec[spec->len - 1];
-       t = btf__type_by_id(spec->btf, acc->type_id);
+       t = btf_type_by_id(spec->btf, acc->type_id);
 
        /* a[n] accessor needs special handling */
        if (!acc->name) {
@@ -729,7 +729,7 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
        case BPF_ENUMVAL_VALUE:
                if (!spec)
                        return -EUCLEAN; /* request instruction poisoning */
-               t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
+               t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
                e = btf_enum(t) + spec->spec[0].idx;
                *val = e->val;
                break;
@@ -805,8 +805,8 @@ static int bpf_core_calc_relo(const char *prog_name,
                if (res->orig_sz != res->new_sz) {
                        const struct btf_type *orig_t, *new_t;
 
-                       orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
-                       new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
+                       orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
+                       new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
 
                        /* There are two use cases in which it's safe to
                         * adjust load/store's mem size:
@@ -1054,7 +1054,7 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
        int i;
 
        type_id = spec->root_type_id;
-       t = btf__type_by_id(spec->btf, type_id);
+       t = btf_type_by_id(spec->btf, type_id);
        s = btf__name_by_offset(spec->btf, t->name_off);
 
        libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
@@ -1158,10 +1158,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
        int i, j, err;
 
        local_id = relo->type_id;
-       local_type = btf__type_by_id(local_btf, local_id);
-       if (!local_type)
-               return -EINVAL;
-
+       local_type = btf_type_by_id(local_btf, local_id);
        local_name = btf__name_by_offset(local_btf, local_type->name_off);
        if (!local_name)
                return -EINVAL;