bpf: Add preempt_count_{sub,add} into btf id deny list
[platform/kernel/linux-starfive.git] / kernel / bpf / verifier.c
index b4d5b34..322a2ae 100644 (file)
@@ -1590,9 +1590,9 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        struct tnum var64_off = tnum_intersect(reg->var_off,
                                               tnum_range(reg->umin_value,
                                                          reg->umax_value));
-       struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
-                                               tnum_range(reg->u32_min_value,
-                                                          reg->u32_max_value));
+       struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
+                                              tnum_range(reg->u32_min_value,
+                                                         reg->u32_max_value));
 
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
@@ -2664,6 +2664,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                if (opcode == BPF_CALL) {
                        if (insn->src_reg == BPF_PSEUDO_CALL)
                                return -ENOTSUPP;
+                       /* kfunc with imm==0 is invalid and fixup_kfunc_call will
+                        * catch this error later. Make backtracking conservative
+                        * with ENOTSUPP.
+                        */
+                       if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
+                               return -ENOTSUPP;
                        /* regular helper call sets R0 */
                        *reg_mask &= ~1;
                        if (*reg_mask & 0x3f) {
@@ -2676,6 +2682,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                        }
                } else if (opcode == BPF_EXIT) {
                        return -ENOTSUPP;
+               } else if (BPF_SRC(insn->code) == BPF_X) {
+                       if (!(*reg_mask & (dreg | sreg)))
+                               return 0;
+                       /* dreg <cond> sreg
+                        * Both dreg and sreg need precision before
+                        * this insn. If only sreg was marked precise
+                        * before it would be equally necessary to
+                        * propagate it to dreg.
+                        */
+                       *reg_mask |= (sreg | dreg);
+                        /* else dreg <cond> K
+                         * Only dreg still needs precision before
+                         * this insn, so for the K-based conditional
+                         * there is nothing new to be marked.
+                         */
                }
        } else if (class == BPF_LD) {
                if (!(*reg_mask & dreg))
@@ -3011,13 +3032,24 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
        return reg->type != SCALAR_VALUE;
 }
 
+/* Copy src state preserving dst->parent and dst->live fields */
+static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
+{
+       struct bpf_reg_state *parent = dst->parent;
+       enum bpf_reg_liveness live = dst->live;
+
+       *dst = *src;
+       dst->parent = parent;
+       dst->live = live;
+}
+
 static void save_register_state(struct bpf_func_state *state,
                                int spi, struct bpf_reg_state *reg,
                                int size)
 {
        int i;
 
-       state->stack[spi].spilled_ptr = *reg;
+       copy_register_state(&state->stack[spi].spilled_ptr, reg);
        if (size == BPF_REG_SIZE)
                state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
 
@@ -3063,7 +3095,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                bool sanitize = reg && is_spillable_regtype(reg->type);
 
                for (i = 0; i < size; i++) {
-                       if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+                       u8 type = state->stack[spi].slot_type[i];
+
+                       if (type != STACK_MISC && type != STACK_ZERO) {
                                sanitize = true;
                                break;
                        }
@@ -3343,7 +3377,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
                                 */
                                s32 subreg_def = state->regs[dst_regno].subreg_def;
 
-                               state->regs[dst_regno] = *reg;
+                               copy_register_state(&state->regs[dst_regno], reg);
                                state->regs[dst_regno].subreg_def = subreg_def;
                        } else {
                                for (i = 0; i < size; i++) {
@@ -3364,7 +3398,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
 
                if (dst_regno >= 0) {
                        /* restore register state from stack */
-                       state->regs[dst_regno] = *reg;
+                       copy_register_state(&state->regs[dst_regno], reg);
                        /* mark reg as written since spilled pointer state likely
                         * has its liveness marks cleared by is_state_visited()
                         * which resets stack/reg liveness for state transitions
@@ -3484,17 +3518,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
        }
        /* Variable offset is prohibited for unprivileged mode for simplicity
         * since it requires corresponding support in Spectre masking for stack
-        * ALU. See also retrieve_ptr_limit().
+        * ALU. See also retrieve_ptr_limit(). The check in
+        * check_stack_access_for_ptr_arithmetic() called by
+        * adjust_ptr_min_max_vals() prevents users from creating stack pointers
+        * with variable offsets, therefore no check is required here. Further,
+        * just checking it here would be insufficient as speculative stack
+        * writes could still lead to unsafe speculative behaviour.
         */
-       if (!env->bypass_spec_v1 && var_off) {
-               char tn_buf[48];
-
-               tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-               verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
-                               ptr_regno, tn_buf);
-               return -EACCES;
-       }
-
        if (!var_off) {
                off += reg->var_off.value;
                err = check_stack_read_fixed_off(env, state, off, size,
@@ -8083,7 +8113,7 @@ do_sim:
         */
        if (!ptr_is_dst_reg) {
                tmp = *dst_reg;
-               *dst_reg = *ptr_reg;
+               copy_register_state(dst_reg, ptr_reg);
        }
        ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
                                        env->insn_idx);
@@ -9336,7 +9366,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                         * to propagate min/max range.
                                         */
                                        src_reg->id = ++env->id_gen;
-                               *dst_reg = *src_reg;
+                               copy_register_state(dst_reg, src_reg);
                                dst_reg->live |= REG_LIVE_WRITTEN;
                                dst_reg->subreg_def = DEF_NOT_SUBREG;
                        } else {
@@ -9347,7 +9377,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                insn->src_reg);
                                        return -EACCES;
                                } else if (src_reg->type == SCALAR_VALUE) {
-                                       *dst_reg = *src_reg;
+                                       copy_register_state(dst_reg, src_reg);
                                        /* Make sure ID is cleared otherwise
                                         * dst_reg min/max could be incorrectly
                                         * propagated into src_reg by find_equal_scalars()
@@ -10143,7 +10173,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
 
        bpf_for_each_reg_in_vstate(vstate, state, reg, ({
                if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
-                       *reg = *known_reg;
+                       copy_register_state(reg, known_reg);
        }));
 }
 
@@ -11874,10 +11904,11 @@ static int propagate_precision(struct bpf_verifier_env *env,
                state_reg = state->regs;
                for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
                        if (state_reg->type != SCALAR_VALUE ||
-                           !state_reg->precise)
+                           !state_reg->precise ||
+                           !(state_reg->live & REG_LIVE_READ))
                                continue;
                        if (env->log.level & BPF_LOG_LEVEL2)
-                               verbose(env, "frame %d: propagating r%d\n", i, fr);
+                               verbose(env, "frame %d: propagating r%d\n", fr, i);
                        err = mark_chain_precision_frame(env, fr, i);
                        if (err < 0)
                                return err;
@@ -11888,11 +11919,12 @@ static int propagate_precision(struct bpf_verifier_env *env,
                                continue;
                        state_reg = &state->stack[i].spilled_ptr;
                        if (state_reg->type != SCALAR_VALUE ||
-                           !state_reg->precise)
+                           !state_reg->precise ||
+                           !(state_reg->live & REG_LIVE_READ))
                                continue;
                        if (env->log.level & BPF_LOG_LEVEL2)
                                verbose(env, "frame %d: propagating fp%d\n",
-                                       (-i - 1) * BPF_REG_SIZE, fr);
+                                       fr, (-i - 1) * BPF_REG_SIZE);
                        err = mark_chain_precision_stack_frame(env, fr, i);
                        if (err < 0)
                                return err;
@@ -15101,6 +15133,10 @@ BTF_ID(func, migrate_enable)
 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
 BTF_ID(func, rcu_read_unlock_strict)
 #endif
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+BTF_ID(func, preempt_count_add)
+BTF_ID(func, preempt_count_sub)
+#endif
 BTF_SET_END(btf_id_deny)
 
 static int check_attach_btf_id(struct bpf_verifier_env *env)