bpf: Add preempt_count_{sub,add} into btf id deny list
[platform/kernel/linux-starfive.git] / kernel / bpf / verifier.c
index 4e09899..322a2ae 100644 (file)
@@ -1000,6 +1000,8 @@ static void print_insn_state(struct bpf_verifier_env *env,
  */
 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
 {
+       size_t alloc_bytes;
+       void *orig = dst;
        size_t bytes;
 
        if (ZERO_OR_NULL_PTR(src))
@@ -1008,11 +1010,11 @@ static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t
        if (unlikely(check_mul_overflow(n, size, &bytes)))
                return NULL;
 
-       if (ksize(dst) < bytes) {
-               kfree(dst);
-               dst = kmalloc_track_caller(bytes, flags);
-               if (!dst)
-                       return NULL;
+       alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
+       dst = krealloc(orig, alloc_bytes, flags);
+       if (!dst) {
+               kfree(orig);
+               return NULL;
        }
 
        memcpy(dst, src, bytes);
@@ -1027,12 +1029,14 @@ out:
  */
 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
 {
+       size_t alloc_size;
        void *new_arr;
 
        if (!new_n || old_n == new_n)
                goto out;
 
-       new_arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
+       alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
+       new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
        if (!new_arr) {
                kfree(arr);
                return NULL;
@@ -1586,9 +1590,9 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        struct tnum var64_off = tnum_intersect(reg->var_off,
                                               tnum_range(reg->umin_value,
                                                          reg->umax_value));
-       struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
-                                               tnum_range(reg->u32_min_value,
-                                                          reg->u32_max_value));
+       struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
+                                              tnum_range(reg->u32_min_value,
+                                                         reg->u32_max_value));
 
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
@@ -2504,9 +2508,11 @@ static int push_jmp_history(struct bpf_verifier_env *env,
 {
        u32 cnt = cur->jmp_history_cnt;
        struct bpf_idx_pair *p;
+       size_t alloc_size;
 
        cnt++;
-       p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
+       alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
+       p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
        if (!p)
                return -ENOMEM;
        p[cnt - 1].idx = env->insn_idx;
@@ -2658,6 +2664,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                if (opcode == BPF_CALL) {
                        if (insn->src_reg == BPF_PSEUDO_CALL)
                                return -ENOTSUPP;
+                       /* kfunc with imm==0 is invalid and fixup_kfunc_call will
+                        * catch this error later. Make backtracking conservative
+                        * with ENOTSUPP.
+                        */
+                       if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
+                               return -ENOTSUPP;
                        /* regular helper call sets R0 */
                        *reg_mask &= ~1;
                        if (*reg_mask & 0x3f) {
@@ -2670,6 +2682,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                        }
                } else if (opcode == BPF_EXIT) {
                        return -ENOTSUPP;
+               } else if (BPF_SRC(insn->code) == BPF_X) {
+                       if (!(*reg_mask & (dreg | sreg)))
+                               return 0;
+                       /* dreg <cond> sreg
+                        * Both dreg and sreg need precision before
+                        * this insn. If only sreg was marked precise
+                        * before it would be equally necessary to
+                        * propagate it to dreg.
+                        */
+                       *reg_mask |= (sreg | dreg);
+                        /* else dreg <cond> K
+                         * Only dreg still needs precision before
+                         * this insn, so for the K-based conditional
+                         * there is nothing new to be marked.
+                         */
                }
        } else if (class == BPF_LD) {
                if (!(*reg_mask & dreg))
@@ -3005,13 +3032,24 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
        return reg->type != SCALAR_VALUE;
 }
 
+/* Copy src state preserving dst->parent and dst->live fields */
+static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
+{
+       struct bpf_reg_state *parent = dst->parent;
+       enum bpf_reg_liveness live = dst->live;
+
+       *dst = *src;
+       dst->parent = parent;
+       dst->live = live;
+}
+
 static void save_register_state(struct bpf_func_state *state,
                                int spi, struct bpf_reg_state *reg,
                                int size)
 {
        int i;
 
-       state->stack[spi].spilled_ptr = *reg;
+       copy_register_state(&state->stack[spi].spilled_ptr, reg);
        if (size == BPF_REG_SIZE)
                state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
 
@@ -3057,7 +3095,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                bool sanitize = reg && is_spillable_regtype(reg->type);
 
                for (i = 0; i < size; i++) {
-                       if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+                       u8 type = state->stack[spi].slot_type[i];
+
+                       if (type != STACK_MISC && type != STACK_ZERO) {
                                sanitize = true;
                                break;
                        }
@@ -3337,7 +3377,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
                                 */
                                s32 subreg_def = state->regs[dst_regno].subreg_def;
 
-                               state->regs[dst_regno] = *reg;
+                               copy_register_state(&state->regs[dst_regno], reg);
                                state->regs[dst_regno].subreg_def = subreg_def;
                        } else {
                                for (i = 0; i < size; i++) {
@@ -3358,7 +3398,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
 
                if (dst_regno >= 0) {
                        /* restore register state from stack */
-                       state->regs[dst_regno] = *reg;
+                       copy_register_state(&state->regs[dst_regno], reg);
                        /* mark reg as written since spilled pointer state likely
                         * has its liveness marks cleared by is_state_visited()
                         * which resets stack/reg liveness for state transitions
@@ -3478,17 +3518,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
        }
        /* Variable offset is prohibited for unprivileged mode for simplicity
         * since it requires corresponding support in Spectre masking for stack
-        * ALU. See also retrieve_ptr_limit().
+        * ALU. See also retrieve_ptr_limit(). The check in
+        * check_stack_access_for_ptr_arithmetic() called by
+        * adjust_ptr_min_max_vals() prevents users from creating stack pointers
+        * with variable offsets, therefore no check is required here. Further,
+        * just checking it here would be insufficient as speculative stack
+        * writes could still lead to unsafe speculative behaviour.
         */
-       if (!env->bypass_spec_v1 && var_off) {
-               char tn_buf[48];
-
-               tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-               verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
-                               ptr_regno, tn_buf);
-               return -EACCES;
-       }
-
        if (!var_off) {
                off += reg->var_off.value;
                err = check_stack_read_fixed_off(env, state, off, size,
@@ -8077,7 +8113,7 @@ do_sim:
         */
        if (!ptr_is_dst_reg) {
                tmp = *dst_reg;
-               *dst_reg = *ptr_reg;
+               copy_register_state(dst_reg, ptr_reg);
        }
        ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
                                        env->insn_idx);
@@ -9330,7 +9366,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                         * to propagate min/max range.
                                         */
                                        src_reg->id = ++env->id_gen;
-                               *dst_reg = *src_reg;
+                               copy_register_state(dst_reg, src_reg);
                                dst_reg->live |= REG_LIVE_WRITTEN;
                                dst_reg->subreg_def = DEF_NOT_SUBREG;
                        } else {
@@ -9341,7 +9377,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                insn->src_reg);
                                        return -EACCES;
                                } else if (src_reg->type == SCALAR_VALUE) {
-                                       *dst_reg = *src_reg;
+                                       copy_register_state(dst_reg, src_reg);
                                        /* Make sure ID is cleared otherwise
                                         * dst_reg min/max could be incorrectly
                                         * propagated into src_reg by find_equal_scalars()
@@ -10137,7 +10173,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
 
        bpf_for_each_reg_in_vstate(vstate, state, reg, ({
                if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
-                       *reg = *known_reg;
+                       copy_register_state(reg, known_reg);
        }));
 }
 
@@ -11868,10 +11904,11 @@ static int propagate_precision(struct bpf_verifier_env *env,
                state_reg = state->regs;
                for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
                        if (state_reg->type != SCALAR_VALUE ||
-                           !state_reg->precise)
+                           !state_reg->precise ||
+                           !(state_reg->live & REG_LIVE_READ))
                                continue;
                        if (env->log.level & BPF_LOG_LEVEL2)
-                               verbose(env, "frame %d: propagating r%d\n", i, fr);
+                               verbose(env, "frame %d: propagating r%d\n", fr, i);
                        err = mark_chain_precision_frame(env, fr, i);
                        if (err < 0)
                                return err;
@@ -11882,11 +11919,12 @@ static int propagate_precision(struct bpf_verifier_env *env,
                                continue;
                        state_reg = &state->stack[i].spilled_ptr;
                        if (state_reg->type != SCALAR_VALUE ||
-                           !state_reg->precise)
+                           !state_reg->precise ||
+                           !(state_reg->live & REG_LIVE_READ))
                                continue;
                        if (env->log.level & BPF_LOG_LEVEL2)
                                verbose(env, "frame %d: propagating fp%d\n",
-                                       (-i - 1) * BPF_REG_SIZE, fr);
+                                       fr, (-i - 1) * BPF_REG_SIZE);
                        err = mark_chain_precision_stack_frame(env, fr, i);
                        if (err < 0)
                                return err;
@@ -15095,6 +15133,10 @@ BTF_ID(func, migrate_enable)
 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
 BTF_ID(func, rcu_read_unlock_strict)
 #endif
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+BTF_ID(func, preempt_count_add)
+BTF_ID(func, preempt_count_sub)
+#endif
 BTF_SET_END(btf_id_deny)
 
 static int check_attach_btf_id(struct bpf_verifier_env *env)