bpf: Factor out inc/dec of active flag into helpers.
authorAlexei Starovoitov <ast@kernel.org>
Thu, 6 Jul 2023 03:34:38 +0000 (20:34 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 12 Jul 2023 21:45:22 +0000 (23:45 +0200)
Factor out local_inc/dec_return(&c->active) into helpers.
No functional changes.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-6-alexei.starovoitov@gmail.com
kernel/bpf/memalloc.c

index 9693b1f8cbda5e49f3a4189f8351285c518463e5..052fc801fb9f44687eb5596412bb5df658385da5 100644 (file)
@@ -154,17 +154,15 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
 #endif
 }
 
-static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
+static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
 {
-       unsigned long flags;
-
        if (IS_ENABLED(CONFIG_PREEMPT_RT))
                /* In RT irq_work runs in per-cpu kthread, so disable
                 * interrupts to avoid preemption and interrupts and
                 * reduce the chance of bpf prog executing on this cpu
                 * when active counter is busy.
                 */
-               local_irq_save(flags);
+               local_irq_save(*flags);
        /* alloc_bulk runs from irq_work which will not preempt a bpf
         * program that does unit_alloc/unit_free since IRQs are
         * disabled there. There is no race to increment 'active'
@@ -172,13 +170,25 @@ static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
         * bpf prog preempted this loop.
         */
        WARN_ON_ONCE(local_inc_return(&c->active) != 1);
-       __llist_add(obj, &c->free_llist);
-       c->free_cnt++;
+}
+
+static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
+{
        local_dec(&c->active);
        if (IS_ENABLED(CONFIG_PREEMPT_RT))
                local_irq_restore(flags);
 }
 
+static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
+{
+       unsigned long flags;
+
+       inc_active(c, &flags);
+       __llist_add(obj, &c->free_llist);
+       c->free_cnt++;
+       dec_active(c, flags);
+}
+
 /* Mostly runs from irq_work except __init phase. */
 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
 {
@@ -300,17 +310,13 @@ static void free_bulk(struct bpf_mem_cache *c)
        int cnt;
 
        do {
-               if (IS_ENABLED(CONFIG_PREEMPT_RT))
-                       local_irq_save(flags);
-               WARN_ON_ONCE(local_inc_return(&c->active) != 1);
+               inc_active(c, &flags);
                llnode = __llist_del_first(&c->free_llist);
                if (llnode)
                        cnt = --c->free_cnt;
                else
                        cnt = 0;
-               local_dec(&c->active);
-               if (IS_ENABLED(CONFIG_PREEMPT_RT))
-                       local_irq_restore(flags);
+               dec_active(c, flags);
                if (llnode)
                        enque_to_free(c, llnode);
        } while (cnt > (c->high_watermark + c->low_watermark) / 2);