bpf: Allow reuse from waiting_for_gp_ttrace list.
authorAlexei Starovoitov <ast@kernel.org>
Thu, 6 Jul 2023 03:34:42 +0000 (20:34 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 12 Jul 2023 21:45:23 +0000 (23:45 +0200)
alloc_bulk() can reuse elements from free_by_rcu_ttrace.
Let it reuse from waiting_for_gp_ttrace as well to avoid unnecessary kmalloc().

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230706033447.54696-10-alexei.starovoitov@gmail.com
kernel/bpf/memalloc.c

index 9986c6b7df4d33a3ec4f5e39e75b249c0cb3c268..e5a87f6cf2ccd64b9b736610bdd540d413ac532b 100644 (file)
@@ -212,6 +212,15 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
        if (i >= cnt)
                return;
 
+       for (; i < cnt; i++) {
+               obj = llist_del_first(&c->waiting_for_gp_ttrace);
+               if (!obj)
+                       break;
+               add_obj_to_free_list(c, obj);
+       }
+       if (i >= cnt)
+               return;
+
        memcg = get_memcg(c);
        old_memcg = set_active_memcg(memcg);
        for (; i < cnt; i++) {
@@ -295,12 +304,7 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
 
        WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
        llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
-               /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
-                * It doesn't race with llist_del_all either.
-                * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
-                * from __free_rcu() and from drain_mem_cache().
-                */
-               __llist_add(llnode, &c->waiting_for_gp_ttrace);
+               llist_add(llnode, &c->waiting_for_gp_ttrace);
 
        if (unlikely(READ_ONCE(c->draining))) {
                __free_rcu(&c->rcu_ttrace);