bpf: Move lnode list node to struct bpf_ksym
authorJiri Olsa <jolsa@kernel.org>
Thu, 12 Mar 2020 19:56:00 +0000 (20:56 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 13 Mar 2020 19:49:51 +0000 (12:49 -0700)
Adding lnode list node to 'struct bpf_ksym' object,
so the struct bpf_ksym itself can be chained and used
in other objects like bpf_trampoline and bpf_dispatcher.

Changing iterator to bpf_ksym in bpf_get_kallsym function.

The ksym->start is holding the prog->bpf_func value,
so it's ok to use it as value in bpf_get_kallsym.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20200312195610.346362-6-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/core.c

index 047b44d..4fad2fa 100644 (file)
@@ -476,6 +476,7 @@ struct bpf_ksym {
        unsigned long            start;
        unsigned long            end;
        char                     name[KSYM_NAME_LEN];
+       struct list_head         lnode;
 };
 
 enum bpf_tramp_prog_type {
@@ -659,7 +660,6 @@ struct bpf_prog_aux {
        struct bpf_jit_poke_descriptor *poke_tab;
        u32 size_poke_tab;
        struct latch_tree_node ksym_tnode;
-       struct list_head ksym_lnode;
        struct bpf_ksym ksym;
        const struct bpf_prog_ops *ops;
        struct bpf_map **used_maps;
index f6800c2..5eb5d5b 100644 (file)
@@ -97,7 +97,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
        fp->aux->prog = fp;
        fp->jit_requested = ebpf_jit_enabled();
 
-       INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
+       INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
 
        return fp;
 }
@@ -613,18 +613,18 @@ static struct latch_tree_root bpf_tree __cacheline_aligned;
 
 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 {
-       WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
-       list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
+       WARN_ON_ONCE(!list_empty(&aux->ksym.lnode));
+       list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms);
        latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 }
 
 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 {
-       if (list_empty(&aux->ksym_lnode))
+       if (list_empty(&aux->ksym.lnode))
                return;
 
        latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
-       list_del_rcu(&aux->ksym_lnode);
+       list_del_rcu(&aux->ksym.lnode);
 }
 
 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
@@ -634,8 +634,8 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 
 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 {
-       return list_empty(&fp->aux->ksym_lnode) ||
-              fp->aux->ksym_lnode.prev == LIST_POISON2;
+       return list_empty(&fp->aux->ksym.lnode) ||
+              fp->aux->ksym.lnode.prev == LIST_POISON2;
 }
 
 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
@@ -729,7 +729,7 @@ out:
 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
                    char *sym)
 {
-       struct bpf_prog_aux *aux;
+       struct bpf_ksym *ksym;
        unsigned int it = 0;
        int ret = -ERANGE;
 
@@ -737,13 +737,13 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
                return ret;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
+       list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
                if (it++ != symnum)
                        continue;
 
-               strncpy(sym, aux->ksym.name, KSYM_NAME_LEN);
+               strncpy(sym, ksym->name, KSYM_NAME_LEN);
 
-               *value = (unsigned long)aux->prog->bpf_func;
+               *value = ksym->start;
                *type  = BPF_SYM_ELF_TYPE;
 
                ret = 0;