bpf: Use this_cpu_{inc_return|dec} for prog->active
authorHou Tao <houtao1@huawei.com>
Thu, 1 Sep 2022 06:19:36 +0000 (14:19 +0800)
committerMartin KaFai Lau <martin.lau@kernel.org>
Thu, 1 Sep 2022 19:16:18 +0000 (12:16 -0700)
Both __this_cpu_inc_return() and __this_cpu_dec() are not preemption
safe and now migrate_disable() doesn't disable preemption, so the update
of prog-active is not atomic and in theory under fully preemptible kernel
recurisve prevention may do not work.

Fixing by using the preemption-safe and IRQ-safe variants.

Fixes: ca06f55b9002 ("bpf: Add per-program recursion prevention mechanism")
Signed-off-by: Hou Tao <houtao1@huawei.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20220901061938.3789460-3-houtao@huaweicloud.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
kernel/bpf/trampoline.c

index ff87e38..ad76940 100644 (file)
@@ -895,7 +895,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
-       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
                inc_misses_counter(prog);
                return 0;
        }
@@ -930,7 +930,7 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       __this_cpu_dec(*(prog->active));
+       this_cpu_dec(*(prog->active));
        migrate_enable();
        rcu_read_unlock();
 }
@@ -966,7 +966,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
        migrate_disable();
        might_fault();
 
-       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
                inc_misses_counter(prog);
                return 0;
        }
@@ -982,7 +982,7 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       __this_cpu_dec(*(prog->active));
+       this_cpu_dec(*(prog->active));
        migrate_enable();
        rcu_read_unlock_trace();
 }