bpf: Reject variable offset alu on PTR_TO_FLOW_KEYS
[platform/kernel/linux-starfive.git] / kernel / bpf / verifier.c
index 873ade1..97fd176 100644 (file)
@@ -1515,7 +1515,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
        if (state->in_async_callback_fn)
                verbose(env, " async_cb");
        verbose(env, "\n");
-       mark_verifier_state_clean(env);
+       if (!print_all)
+               mark_verifier_state_clean(env);
 }
 
 static inline u32 vlog_alignment(u32 pos)
@@ -1631,7 +1632,10 @@ static int resize_reference_state(struct bpf_func_state *state, size_t n)
        return 0;
 }
 
-static int grow_stack_state(struct bpf_func_state *state, int size)
+/* Possibly update state->allocated_stack to be at least size bytes. Also
+ * possibly update the function's high-water mark in its bpf_subprog_info.
+ */
+static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
 {
        size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
 
@@ -1643,6 +1647,11 @@ static int grow_stack_state(struct bpf_func_state *state, int size)
                return -ENOMEM;
 
        state->allocated_stack = size;
+
+       /* update known max for given subprogram */
+       if (env->subprog_info[state->subprogno].stack_depth < size)
+               env->subprog_info[state->subprogno].stack_depth = size;
+
        return 0;
 }
 
@@ -3200,12 +3209,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
 
 /* Backtrack one insn at a time. If idx is not at the top of recorded
  * history then previous instruction came from straight line execution.
+ * Return -ENOENT if we exhausted all instructions within given state.
+ *
+ * It's legal to have a bit of a looping with the same starting and ending
+ * insn index within the same state, e.g.: 3->4->5->3, so just because current
+ * instruction index is the same as state's first_idx doesn't mean we are
+ * done. If there is still some jump history left, we should keep going. We
+ * need to take into account that we might have a jump history between given
+ * state's parent and itself, due to checkpointing. In this case, we'll have
+ * history entry recording a jump from last instruction of parent state and
+ * first instruction of given state.
  */
 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
                             u32 *history)
 {
        u32 cnt = *history;
 
+       if (i == st->first_insn_idx) {
+               if (cnt == 0)
+                       return -ENOENT;
+               if (cnt == 1 && st->jmp_history[0].idx == i)
+                       return -ENOENT;
+       }
+
        if (cnt && st->jmp_history[cnt - 1].idx == i) {
                i = st->jmp_history[cnt - 1].prev_idx;
                (*history)--;
@@ -3426,7 +3452,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
        if (class == BPF_ALU || class == BPF_ALU64) {
                if (!bt_is_reg_set(bt, dreg))
                        return 0;
-               if (opcode == BPF_MOV) {
+               if (opcode == BPF_END || opcode == BPF_NEG) {
+                       /* sreg is reserved and unused
+                        * dreg still need precision before this insn
+                        */
+                       return 0;
+               } else if (opcode == BPF_MOV) {
                        if (BPF_SRC(insn->code) == BPF_X) {
                                /* dreg = sreg or dreg = (s8, s16, s32)sreg
                                 * dreg needs precision after this insn
@@ -4080,10 +4111,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
                                 * Nothing to be tracked further in the parent state.
                                 */
                                return 0;
-                       if (i == first_idx)
-                               break;
                        subseq_idx = i;
                        i = get_prev_insn_idx(st, i, &history);
+                       if (i == -ENOENT)
+                               break;
                        if (i >= env->prog->len) {
                                /* This can happen if backtracking reached insn 0
                                 * and there are still reg_mask or stack_mask
@@ -4300,14 +4331,11 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
        struct bpf_reg_state *reg = NULL;
        u32 dst_reg = insn->dst_reg;
 
-       err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
-       if (err)
-               return err;
        /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
         * so it's aligned access and [off, off + size) are within stack limits
         */
        if (!env->allow_ptr_leaks &&
-           state->stack[spi].slot_type[0] == STACK_SPILL &&
+           is_spilled_reg(&state->stack[spi]) &&
            size != BPF_REG_SIZE) {
                verbose(env, "attempt to corrupt spilled pointer on stack\n");
                return -EACCES;
@@ -4358,7 +4386,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                   insn->imm != 0 && env->bpf_capable) {
                struct bpf_reg_state fake_reg = {};
 
-               __mark_reg_known(&fake_reg, (u32)insn->imm);
+               __mark_reg_known(&fake_reg, insn->imm);
                fake_reg.type = SCALAR_VALUE;
                save_register_state(state, spi, &fake_reg, size);
        } else if (reg && is_spillable_regtype(reg->type)) {
@@ -4458,10 +4486,6 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
            (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
                writing_zero = true;
 
-       err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
-       if (err)
-               return err;
-
        for (i = min_off; i < max_off; i++) {
                int spi;
 
@@ -5576,20 +5600,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                                           strict);
 }
 
-static int update_stack_depth(struct bpf_verifier_env *env,
-                             const struct bpf_func_state *func,
-                             int off)
-{
-       u16 stack = env->subprog_info[func->subprogno].stack_depth;
-
-       if (stack >= -off)
-               return 0;
-
-       /* update known max for given subprogram */
-       env->subprog_info[func->subprogno].stack_depth = -off;
-       return 0;
-}
-
 /* starting from main bpf function walk all instructions of the function
  * and recursively walk all callees that given function can call.
  * Ignore jump and exit insns.
@@ -6348,13 +6358,14 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
  * The minimum valid offset is -MAX_BPF_STACK for writes, and
  * -state->allocated_stack for reads.
  */
-static int check_stack_slot_within_bounds(int off,
-                                         struct bpf_func_state *state,
-                                         enum bpf_access_type t)
+static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
+                                          s64 off,
+                                          struct bpf_func_state *state,
+                                          enum bpf_access_type t)
 {
        int min_valid_off;
 
-       if (t == BPF_WRITE)
+       if (t == BPF_WRITE || env->allow_uninit_stack)
                min_valid_off = -MAX_BPF_STACK;
        else
                min_valid_off = -state->allocated_stack;
@@ -6377,7 +6388,7 @@ static int check_stack_access_within_bounds(
        struct bpf_reg_state *regs = cur_regs(env);
        struct bpf_reg_state *reg = regs + regno;
        struct bpf_func_state *state = func(env, reg);
-       int min_off, max_off;
+       s64 min_off, max_off;
        int err;
        char *err_extra;
 
@@ -6390,11 +6401,8 @@ static int check_stack_access_within_bounds(
                err_extra = " write to";
 
        if (tnum_is_const(reg->var_off)) {
-               min_off = reg->var_off.value + off;
-               if (access_size > 0)
-                       max_off = min_off + access_size - 1;
-               else
-                       max_off = min_off;
+               min_off = (s64)reg->var_off.value + off;
+               max_off = min_off + access_size;
        } else {
                if (reg->smax_value >= BPF_MAX_VAR_OFF ||
                    reg->smin_value <= -BPF_MAX_VAR_OFF) {
@@ -6403,15 +6411,12 @@ static int check_stack_access_within_bounds(
                        return -EACCES;
                }
                min_off = reg->smin_value + off;
-               if (access_size > 0)
-                       max_off = reg->smax_value + off + access_size - 1;
-               else
-                       max_off = min_off;
+               max_off = reg->smax_value + off + access_size;
        }
 
-       err = check_stack_slot_within_bounds(min_off, state, type);
-       if (!err)
-               err = check_stack_slot_within_bounds(max_off, state, type);
+       err = check_stack_slot_within_bounds(env, min_off, state, type);
+       if (!err && max_off > 0)
+               err = -EINVAL; /* out of stack access into non-negative offsets */
 
        if (err) {
                if (tnum_is_const(reg->var_off)) {
@@ -6424,8 +6429,10 @@ static int check_stack_access_within_bounds(
                        verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
                                err_extra, regno, tn_buf, access_size);
                }
+               return err;
        }
-       return err;
+
+       return grow_stack_state(env, state, round_up(-min_off, BPF_REG_SIZE));
 }
 
 /* check whether memory at (regno + off) is accessible for t = (read | write)
@@ -6440,7 +6447,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 {
        struct bpf_reg_state *regs = cur_regs(env);
        struct bpf_reg_state *reg = regs + regno;
-       struct bpf_func_state *state;
        int size, err = 0;
 
        size = bpf_size_to_bytes(bpf_size);
@@ -6583,11 +6589,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                if (err)
                        return err;
 
-               state = func(env, reg);
-               err = update_stack_depth(env, state, off);
-               if (err)
-                       return err;
-
                if (t == BPF_READ)
                        err = check_stack_read(env, regno, off, size,
                                               value_regno);
@@ -6782,7 +6783,8 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
 
 /* When register 'regno' is used to read the stack (either directly or through
  * a helper function) make sure that it's within stack boundary and, depending
- * on the access type, that all elements of the stack are initialized.
+ * on the access type and privileges, that all elements of the stack are
+ * initialized.
  *
  * 'off' includes 'regno->off', but not its dynamic part (if any).
  *
@@ -6890,8 +6892,11 @@ static int check_stack_range_initialized(
 
                slot = -i - 1;
                spi = slot / BPF_REG_SIZE;
-               if (state->allocated_stack <= slot)
-                       goto err;
+               if (state->allocated_stack <= slot) {
+                       verbose(env, "verifier bug: allocated_stack too small");
+                       return -EFAULT;
+               }
+
                stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
                if (*stype == STACK_MISC)
                        goto mark;
@@ -6915,7 +6920,6 @@ static int check_stack_range_initialized(
                        goto mark;
                }
 
-err:
                if (tnum_is_const(reg->var_off)) {
                        verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
                                err_extra, regno, min_off, i - min_off, access_size);
@@ -6940,7 +6944,7 @@ mark:
                 * helper may write to the entire memory range.
                 */
        }
-       return update_stack_depth(env, state, min_off);
+       return 0;
 }
 
 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -9261,6 +9265,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
                        verbose(env, "R0 not a scalar value\n");
                        return -EACCES;
                }
+
+               /* we are going to rely on register's precise value */
+               err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
+               err = err ?: mark_chain_precision(env, BPF_REG_0);
+               if (err)
+                       return err;
+
                if (!tnum_in(range, r0->var_off)) {
                        verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
                        return -EINVAL;
@@ -11202,6 +11213,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
                        break;
                }
                case KF_ARG_PTR_TO_CALLBACK:
+                       if (reg->type != PTR_TO_FUNC) {
+                               verbose(env, "arg%d expected pointer to func\n", i);
+                               return -EINVAL;
+                       }
                        meta->subprogno = reg->subprogno;
                        break;
                case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
@@ -12072,6 +12087,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        }
 
        switch (base_type(ptr_reg->type)) {
+       case PTR_TO_FLOW_KEYS:
+               if (known)
+                       break;
+               fallthrough;
        case CONST_PTR_TO_MAP:
                /* smin_val represents the known value */
                if (known && smin_val == 0 && opcode == BPF_ADD)
@@ -14135,6 +14154,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                    !sanitize_speculative_path(env, insn, *insn_idx + 1,
                                               *insn_idx))
                        return -EFAULT;
+               if (env->log.level & BPF_LOG_LEVEL)
+                       print_insn_state(env, this_branch->frame[this_branch->curframe]);
                *insn_idx += insn->off;
                return 0;
        } else if (pred == 0) {
@@ -14147,6 +14168,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                                               *insn_idx + insn->off + 1,
                                               *insn_idx))
                        return -EFAULT;
+               if (env->log.level & BPF_LOG_LEVEL)
+                       print_insn_state(env, this_branch->frame[this_branch->curframe]);
                return 0;
        }
 
@@ -14725,8 +14748,7 @@ enum {
  * w - next instruction
  * e - edge
  */
-static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
-                    bool loop_ok)
+static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
 {
        int *insn_stack = env->cfg.insn_stack;
        int *insn_state = env->cfg.insn_state;
@@ -14758,7 +14780,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
                insn_stack[env->cfg.cur_stack++] = w;
                return KEEP_EXPLORING;
        } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
-               if (loop_ok && env->bpf_capable)
+               if (env->bpf_capable)
                        return DONE_EXPLORING;
                verbose_linfo(env, t, "%d: ", t);
                verbose_linfo(env, w, "%d: ", w);
@@ -14778,24 +14800,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
                                struct bpf_verifier_env *env,
                                bool visit_callee)
 {
-       int ret;
+       int ret, insn_sz;
 
-       ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
+       insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
+       ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
        if (ret)
                return ret;
 
-       mark_prune_point(env, t + 1);
+       mark_prune_point(env, t + insn_sz);
        /* when we exit from subprog, we need to record non-linear history */
-       mark_jmp_point(env, t + 1);
+       mark_jmp_point(env, t + insn_sz);
 
        if (visit_callee) {
                mark_prune_point(env, t);
-               ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
-                               /* It's ok to allow recursion from CFG point of
-                                * view. __check_func_call() will do the actual
-                                * check.
-                                */
-                               bpf_pseudo_func(insns + t));
+               ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
        }
        return ret;
 }
@@ -14808,15 +14826,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
 static int visit_insn(int t, struct bpf_verifier_env *env)
 {
        struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
-       int ret, off;
+       int ret, off, insn_sz;
 
        if (bpf_pseudo_func(insn))
                return visit_func_call_insn(t, insns, env, true);
 
        /* All non-branch instructions have a single fall-through edge. */
        if (BPF_CLASS(insn->code) != BPF_JMP &&
-           BPF_CLASS(insn->code) != BPF_JMP32)
-               return push_insn(t, t + 1, FALLTHROUGH, env, false);
+           BPF_CLASS(insn->code) != BPF_JMP32) {
+               insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
+               return push_insn(t, t + insn_sz, FALLTHROUGH, env);
+       }
 
        switch (BPF_OP(insn->code)) {
        case BPF_EXIT:
@@ -14862,8 +14882,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
                        off = insn->imm;
 
                /* unconditional jump with single edge */
-               ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
-                               true);
+               ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
                if (ret)
                        return ret;
 
@@ -14876,11 +14895,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
                /* conditional jump with two edges */
                mark_prune_point(env, t);
 
-               ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
+               ret = push_insn(t, t + 1, FALLTHROUGH, env);
                if (ret)
                        return ret;
 
-               return push_insn(t, t + insn->off + 1, BRANCH, env, true);
+               return push_insn(t, t + insn->off + 1, BRANCH, env);
        }
 }
 
@@ -14935,11 +14954,21 @@ static int check_cfg(struct bpf_verifier_env *env)
        }
 
        for (i = 0; i < insn_cnt; i++) {
+               struct bpf_insn *insn = &env->prog->insnsi[i];
+
                if (insn_state[i] != EXPLORED) {
                        verbose(env, "unreachable insn %d\n", i);
                        ret = -EINVAL;
                        goto err_free;
                }
+               if (bpf_is_ldimm64(insn)) {
+                       if (insn_state[i + 1] != 0) {
+                               verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
+                               ret = -EINVAL;
+                               goto err_free;
+                       }
+                       i++; /* skip second half of ldimm64 */
+               }
        }
        ret = 0; /* cfg looks good */
 
@@ -19641,6 +19670,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
        if (!tr)
                return -ENOMEM;
 
+       if (tgt_prog && tgt_prog->aux->tail_call_reachable)
+               tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
+
        prog->aux->dst_trampoline = tr;
        return 0;
 }