bpf: Guard stack limits against 32bit overflow
[platform/kernel/linux-rpi.git] / kernel / bpf / verifier.c
index 173a8ae..4759950 100644 (file)
@@ -3444,7 +3444,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
        if (class == BPF_ALU || class == BPF_ALU64) {
                if (!bt_is_reg_set(bt, dreg))
                        return 0;
-               if (opcode == BPF_MOV) {
+               if (opcode == BPF_END || opcode == BPF_NEG) {
+                       /* sreg is reserved and unused
+                        * dreg still need precision before this insn
+                        */
+                       return 0;
+               } else if (opcode == BPF_MOV) {
                        if (BPF_SRC(insn->code) == BPF_X) {
                                /* dreg = sreg or dreg = (s8, s16, s32)sreg
                                 * dreg needs precision after this insn
@@ -4325,7 +4330,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
         * so it's aligned access and [off, off + size) are within stack limits
         */
        if (!env->allow_ptr_leaks &&
-           state->stack[spi].slot_type[0] == STACK_SPILL &&
+           is_spilled_reg(&state->stack[spi]) &&
            size != BPF_REG_SIZE) {
                verbose(env, "attempt to corrupt spilled pointer on stack\n");
                return -EACCES;
@@ -6366,7 +6371,7 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
  * The minimum valid offset is -MAX_BPF_STACK for writes, and
  * -state->allocated_stack for reads.
  */
-static int check_stack_slot_within_bounds(int off,
+static int check_stack_slot_within_bounds(s64 off,
                                          struct bpf_func_state *state,
                                          enum bpf_access_type t)
 {
@@ -6395,7 +6400,7 @@ static int check_stack_access_within_bounds(
        struct bpf_reg_state *regs = cur_regs(env);
        struct bpf_reg_state *reg = regs + regno;
        struct bpf_func_state *state = func(env, reg);
-       int min_off, max_off;
+       s64 min_off, max_off;
        int err;
        char *err_extra;
 
@@ -6408,11 +6413,8 @@ static int check_stack_access_within_bounds(
                err_extra = " write to";
 
        if (tnum_is_const(reg->var_off)) {
-               min_off = reg->var_off.value + off;
-               if (access_size > 0)
-                       max_off = min_off + access_size - 1;
-               else
-                       max_off = min_off;
+               min_off = (s64)reg->var_off.value + off;
+               max_off = min_off + access_size;
        } else {
                if (reg->smax_value >= BPF_MAX_VAR_OFF ||
                    reg->smin_value <= -BPF_MAX_VAR_OFF) {
@@ -6421,15 +6423,12 @@ static int check_stack_access_within_bounds(
                        return -EACCES;
                }
                min_off = reg->smin_value + off;
-               if (access_size > 0)
-                       max_off = reg->smax_value + off + access_size - 1;
-               else
-                       max_off = min_off;
+               max_off = reg->smax_value + off + access_size;
        }
 
        err = check_stack_slot_within_bounds(min_off, state, type);
-       if (!err)
-               err = check_stack_slot_within_bounds(max_off, state, type);
+       if (!err && max_off > 0)
+               err = -EINVAL; /* out of stack access into non-negative offsets */
 
        if (err) {
                if (tnum_is_const(reg->var_off)) {
@@ -9279,6 +9278,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
                        verbose(env, "R0 not a scalar value\n");
                        return -EACCES;
                }
+
+               /* we are going to rely on register's precise value */
+               err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
+               err = err ?: mark_chain_precision(env, BPF_REG_0);
+               if (err)
+                       return err;
+
                if (!tnum_in(range, r0->var_off)) {
                        verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
                        return -EINVAL;