bpf: Move ksym_tnode to bpf_ksym
authorJiri Olsa <jolsa@kernel.org>
Thu, 12 Mar 2020 19:56:01 +0000 (20:56 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 13 Mar 2020 19:49:51 +0000 (12:49 -0700)
Moving ksym_tnode list node to 'struct bpf_ksym' object,
so the symbol itself can be chained and used in other
objects like bpf_trampoline and bpf_dispatcher.

We need bpf_ksym object to be linked both in bpf_kallsyms
via lnode for /proc/kallsyms and in bpf_tree via tnode for
bpf address lookup functions like __bpf_address_lookup or
bpf_prog_kallsyms_find.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200312195610.346362-7-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/core.c

index 4fad2fa4135c121cdf3f6cf246d28e774504efeb..68d66b0078dfb5696a4bcbdf7c8f6071007eb15c 100644 (file)
@@ -477,6 +477,7 @@ struct bpf_ksym {
        unsigned long            end;
        char                     name[KSYM_NAME_LEN];
        struct list_head         lnode;
+       struct latch_tree_node   tnode;
 };
 
 enum bpf_tramp_prog_type {
@@ -659,7 +660,6 @@ struct bpf_prog_aux {
        void *jit_data; /* JIT specific data. arch dependent */
        struct bpf_jit_poke_descriptor *poke_tab;
        u32 size_poke_tab;
-       struct latch_tree_node ksym_tnode;
        struct bpf_ksym ksym;
        const struct bpf_prog_ops *ops;
        struct bpf_map **used_maps;
index 5eb5d5bb7a951f909d11f5a3e7dd860988d0fd5e..ab1846c34167f41009eff8b5aca16f631c171294 100644 (file)
@@ -572,31 +572,27 @@ bpf_prog_ksym_set_name(struct bpf_prog *prog)
                *sym = 0;
 }
 
-static __always_inline unsigned long
-bpf_get_prog_addr_start(struct latch_tree_node *n)
+static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
 {
-       const struct bpf_prog_aux *aux;
-
-       aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
-       return aux->ksym.start;
+       return container_of(n, struct bpf_ksym, tnode)->start;
 }
 
 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
                                          struct latch_tree_node *b)
 {
-       return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
+       return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
 }
 
 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 {
        unsigned long val = (unsigned long)key;
-       const struct bpf_prog_aux *aux;
+       const struct bpf_ksym *ksym;
 
-       aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
+       ksym = container_of(n, struct bpf_ksym, tnode);
 
-       if (val < aux->ksym.start)
+       if (val < ksym->start)
                return -1;
-       if (val >= aux->ksym.end)
+       if (val >= ksym->end)
                return  1;
 
        return 0;
@@ -615,7 +611,7 @@ static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 {
        WARN_ON_ONCE(!list_empty(&aux->ksym.lnode));
        list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms);
-       latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+       latch_tree_insert(&aux->ksym.tnode, &bpf_tree, &bpf_tree_ops);
 }
 
 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
@@ -623,7 +619,7 @@ static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
        if (list_empty(&aux->ksym.lnode))
                return;
 
-       latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+       latch_tree_erase(&aux->ksym.tnode, &bpf_tree, &bpf_tree_ops);
        list_del_rcu(&aux->ksym.lnode);
 }
 
@@ -668,7 +664,7 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 
        n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
        return n ?
-              container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
+              container_of(n, struct bpf_prog_aux, ksym.tnode)->prog :
               NULL;
 }