bpf: Refactor alloc_bulk().
authorAlexei Starovoitov <ast@kernel.org>
Thu, 6 Jul 2023 03:34:37 +0000 (20:34 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 12 Jul 2023 21:45:22 +0000 (23:45 +0200)
Factor out inner body of alloc_bulk into separate helper.
No functional changes.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-5-alexei.starovoitov@gmail.com
kernel/bpf/memalloc.c

index 693651d..9693b1f 100644 (file)
@@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
 #endif
 }
 
+static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
+{
+       unsigned long flags;
+
+       if (IS_ENABLED(CONFIG_PREEMPT_RT))
+               /* In RT irq_work runs in per-cpu kthread, so disable
+                * interrupts to avoid preemption and interrupts and
+                * reduce the chance of bpf prog executing on this cpu
+                * when active counter is busy.
+                */
+               local_irq_save(flags);
+       /* alloc_bulk runs from irq_work which will not preempt a bpf
+        * program that does unit_alloc/unit_free since IRQs are
+        * disabled there. There is no race to increment 'active'
+        * counter. It protects free_llist from corruption in case NMI
+        * bpf prog preempted this loop.
+        */
+       WARN_ON_ONCE(local_inc_return(&c->active) != 1);
+       __llist_add(obj, &c->free_llist);
+       c->free_cnt++;
+       local_dec(&c->active);
+       if (IS_ENABLED(CONFIG_PREEMPT_RT))
+               local_irq_restore(flags);
+}
+
 /* Mostly runs from irq_work except __init phase. */
 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
 {
        struct mem_cgroup *memcg = NULL, *old_memcg;
-       unsigned long flags;
        void *obj;
        int i;
 
@@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
                        if (!obj)
                                break;
                }
-               if (IS_ENABLED(CONFIG_PREEMPT_RT))
-                       /* In RT irq_work runs in per-cpu kthread, so disable
-                        * interrupts to avoid preemption and interrupts and
-                        * reduce the chance of bpf prog executing on this cpu
-                        * when active counter is busy.
-                        */
-                       local_irq_save(flags);
-               /* alloc_bulk runs from irq_work which will not preempt a bpf
-                * program that does unit_alloc/unit_free since IRQs are
-                * disabled there. There is no race to increment 'active'
-                * counter. It protects free_llist from corruption in case NMI
-                * bpf prog preempted this loop.
-                */
-               WARN_ON_ONCE(local_inc_return(&c->active) != 1);
-               __llist_add(obj, &c->free_llist);
-               c->free_cnt++;
-               local_dec(&c->active);
-               if (IS_ENABLED(CONFIG_PREEMPT_RT))
-                       local_irq_restore(flags);
+               add_obj_to_free_list(c, obj);
        }
        set_active_memcg(old_memcg);
        mem_cgroup_put(memcg);