bpf: Guard stack limits against 32bit overflow
[platform/kernel/linux-rpi.git] / kernel / bpf / verifier.c
index 873ade1..4759950 100644 (file)
@@ -1515,7 +1515,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
        if (state->in_async_callback_fn)
                verbose(env, " async_cb");
        verbose(env, "\n");
-       mark_verifier_state_clean(env);
+       if (!print_all)
+               mark_verifier_state_clean(env);
 }
 
 static inline u32 vlog_alignment(u32 pos)
@@ -3200,12 +3201,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
 
 /* Backtrack one insn at a time. If idx is not at the top of recorded
  * history then previous instruction came from straight line execution.
+ * Return -ENOENT if we exhausted all instructions within given state.
+ *
+ * It's legal to have a bit of a looping with the same starting and ending
+ * insn index within the same state, e.g.: 3->4->5->3, so just because current
+ * instruction index is the same as state's first_idx doesn't mean we are
+ * done. If there is still some jump history left, we should keep going. We
+ * need to take into account that we might have a jump history between given
+ * state's parent and itself, due to checkpointing. In this case, we'll have
+ * history entry recording a jump from last instruction of parent state and
+ * first instruction of given state.
  */
 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
                             u32 *history)
 {
        u32 cnt = *history;
 
+       if (i == st->first_insn_idx) {
+               if (cnt == 0)
+                       return -ENOENT;
+               if (cnt == 1 && st->jmp_history[0].idx == i)
+                       return -ENOENT;
+       }
+
        if (cnt && st->jmp_history[cnt - 1].idx == i) {
                i = st->jmp_history[cnt - 1].prev_idx;
                (*history)--;
@@ -3426,7 +3444,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
        if (class == BPF_ALU || class == BPF_ALU64) {
                if (!bt_is_reg_set(bt, dreg))
                        return 0;
-               if (opcode == BPF_MOV) {
+               if (opcode == BPF_END || opcode == BPF_NEG) {
+                       /* sreg is reserved and unused
+                        * dreg still need precision before this insn
+                        */
+                       return 0;
+               } else if (opcode == BPF_MOV) {
                        if (BPF_SRC(insn->code) == BPF_X) {
                                /* dreg = sreg or dreg = (s8, s16, s32)sreg
                                 * dreg needs precision after this insn
@@ -4080,10 +4103,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
                                 * Nothing to be tracked further in the parent state.
                                 */
                                return 0;
-                       if (i == first_idx)
-                               break;
                        subseq_idx = i;
                        i = get_prev_insn_idx(st, i, &history);
+                       if (i == -ENOENT)
+                               break;
                        if (i >= env->prog->len) {
                                /* This can happen if backtracking reached insn 0
                                 * and there are still reg_mask or stack_mask
@@ -4307,7 +4330,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
         * so it's aligned access and [off, off + size) are within stack limits
         */
        if (!env->allow_ptr_leaks &&
-           state->stack[spi].slot_type[0] == STACK_SPILL &&
+           is_spilled_reg(&state->stack[spi]) &&
            size != BPF_REG_SIZE) {
                verbose(env, "attempt to corrupt spilled pointer on stack\n");
                return -EACCES;
@@ -4358,7 +4381,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                   insn->imm != 0 && env->bpf_capable) {
                struct bpf_reg_state fake_reg = {};
 
-               __mark_reg_known(&fake_reg, (u32)insn->imm);
+               __mark_reg_known(&fake_reg, insn->imm);
                fake_reg.type = SCALAR_VALUE;
                save_register_state(state, spi, &fake_reg, size);
        } else if (reg && is_spillable_regtype(reg->type)) {
@@ -6348,7 +6371,7 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
  * The minimum valid offset is -MAX_BPF_STACK for writes, and
  * -state->allocated_stack for reads.
  */
-static int check_stack_slot_within_bounds(int off,
+static int check_stack_slot_within_bounds(s64 off,
                                          struct bpf_func_state *state,
                                          enum bpf_access_type t)
 {
@@ -6377,7 +6400,7 @@ static int check_stack_access_within_bounds(
        struct bpf_reg_state *regs = cur_regs(env);
        struct bpf_reg_state *reg = regs + regno;
        struct bpf_func_state *state = func(env, reg);
-       int min_off, max_off;
+       s64 min_off, max_off;
        int err;
        char *err_extra;
 
@@ -6390,11 +6413,8 @@ static int check_stack_access_within_bounds(
                err_extra = " write to";
 
        if (tnum_is_const(reg->var_off)) {
-               min_off = reg->var_off.value + off;
-               if (access_size > 0)
-                       max_off = min_off + access_size - 1;
-               else
-                       max_off = min_off;
+               min_off = (s64)reg->var_off.value + off;
+               max_off = min_off + access_size;
        } else {
                if (reg->smax_value >= BPF_MAX_VAR_OFF ||
                    reg->smin_value <= -BPF_MAX_VAR_OFF) {
@@ -6403,15 +6423,12 @@ static int check_stack_access_within_bounds(
                        return -EACCES;
                }
                min_off = reg->smin_value + off;
-               if (access_size > 0)
-                       max_off = reg->smax_value + off + access_size - 1;
-               else
-                       max_off = min_off;
+               max_off = reg->smax_value + off + access_size;
        }
 
        err = check_stack_slot_within_bounds(min_off, state, type);
-       if (!err)
-               err = check_stack_slot_within_bounds(max_off, state, type);
+       if (!err && max_off > 0)
+               err = -EINVAL; /* out of stack access into non-negative offsets */
 
        if (err) {
                if (tnum_is_const(reg->var_off)) {
@@ -9261,6 +9278,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
                        verbose(env, "R0 not a scalar value\n");
                        return -EACCES;
                }
+
+               /* we are going to rely on register's precise value */
+               err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
+               err = err ?: mark_chain_precision(env, BPF_REG_0);
+               if (err)
+                       return err;
+
                if (!tnum_in(range, r0->var_off)) {
                        verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
                        return -EINVAL;
@@ -11202,6 +11226,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
                        break;
                }
                case KF_ARG_PTR_TO_CALLBACK:
+                       if (reg->type != PTR_TO_FUNC) {
+                               verbose(env, "arg%d expected pointer to func\n", i);
+                               return -EINVAL;
+                       }
                        meta->subprogno = reg->subprogno;
                        break;
                case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
@@ -14135,6 +14163,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                    !sanitize_speculative_path(env, insn, *insn_idx + 1,
                                               *insn_idx))
                        return -EFAULT;
+               if (env->log.level & BPF_LOG_LEVEL)
+                       print_insn_state(env, this_branch->frame[this_branch->curframe]);
                *insn_idx += insn->off;
                return 0;
        } else if (pred == 0) {
@@ -14147,6 +14177,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                                               *insn_idx + insn->off + 1,
                                               *insn_idx))
                        return -EFAULT;
+               if (env->log.level & BPF_LOG_LEVEL)
+                       print_insn_state(env, this_branch->frame[this_branch->curframe]);
                return 0;
        }
 
@@ -14725,8 +14757,7 @@ enum {
  * w - next instruction
  * e - edge
  */
-static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
-                    bool loop_ok)
+static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
 {
        int *insn_stack = env->cfg.insn_stack;
        int *insn_state = env->cfg.insn_state;
@@ -14758,7 +14789,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
                insn_stack[env->cfg.cur_stack++] = w;
                return KEEP_EXPLORING;
        } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
-               if (loop_ok && env->bpf_capable)
+               if (env->bpf_capable)
                        return DONE_EXPLORING;
                verbose_linfo(env, t, "%d: ", t);
                verbose_linfo(env, w, "%d: ", w);
@@ -14778,24 +14809,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
                                struct bpf_verifier_env *env,
                                bool visit_callee)
 {
-       int ret;
+       int ret, insn_sz;
 
-       ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
+       insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
+       ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
        if (ret)
                return ret;
 
-       mark_prune_point(env, t + 1);
+       mark_prune_point(env, t + insn_sz);
        /* when we exit from subprog, we need to record non-linear history */
-       mark_jmp_point(env, t + 1);
+       mark_jmp_point(env, t + insn_sz);
 
        if (visit_callee) {
                mark_prune_point(env, t);
-               ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
-                               /* It's ok to allow recursion from CFG point of
-                                * view. __check_func_call() will do the actual
-                                * check.
-                                */
-                               bpf_pseudo_func(insns + t));
+               ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
        }
        return ret;
 }
@@ -14808,15 +14835,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
 static int visit_insn(int t, struct bpf_verifier_env *env)
 {
        struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
-       int ret, off;
+       int ret, off, insn_sz;
 
        if (bpf_pseudo_func(insn))
                return visit_func_call_insn(t, insns, env, true);
 
        /* All non-branch instructions have a single fall-through edge. */
        if (BPF_CLASS(insn->code) != BPF_JMP &&
-           BPF_CLASS(insn->code) != BPF_JMP32)
-               return push_insn(t, t + 1, FALLTHROUGH, env, false);
+           BPF_CLASS(insn->code) != BPF_JMP32) {
+               insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
+               return push_insn(t, t + insn_sz, FALLTHROUGH, env);
+       }
 
        switch (BPF_OP(insn->code)) {
        case BPF_EXIT:
@@ -14862,8 +14891,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
                        off = insn->imm;
 
                /* unconditional jump with single edge */
-               ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
-                               true);
+               ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
                if (ret)
                        return ret;
 
@@ -14876,11 +14904,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
                /* conditional jump with two edges */
                mark_prune_point(env, t);
 
-               ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
+               ret = push_insn(t, t + 1, FALLTHROUGH, env);
                if (ret)
                        return ret;
 
-               return push_insn(t, t + insn->off + 1, BRANCH, env, true);
+               return push_insn(t, t + insn->off + 1, BRANCH, env);
        }
 }
 
@@ -14935,11 +14963,21 @@ static int check_cfg(struct bpf_verifier_env *env)
        }
 
        for (i = 0; i < insn_cnt; i++) {
+               struct bpf_insn *insn = &env->prog->insnsi[i];
+
                if (insn_state[i] != EXPLORED) {
                        verbose(env, "unreachable insn %d\n", i);
                        ret = -EINVAL;
                        goto err_free;
                }
+               if (bpf_is_ldimm64(insn)) {
+                       if (insn_state[i + 1] != 0) {
+                               verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
+                               ret = -EINVAL;
+                               goto err_free;
+                       }
+                       i++; /* skip second half of ldimm64 */
+               }
        }
        ret = 0; /* cfg looks good */
 
@@ -19641,6 +19679,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
        if (!tr)
                return -ENOMEM;
 
+       if (tgt_prog && tgt_prog->aux->tail_call_reachable)
+               tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
+
        prog->aux->dst_trampoline = tr;
        return 0;
 }