bpf: Check the other end of slot_type for STACK_SPILL
authorMartin KaFai Lau <kafai@fb.com>
Wed, 22 Sep 2021 00:49:34 +0000 (17:49 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 31 Dec 2022 12:14:08 +0000 (13:14 +0100)
[ Upstream commit 27113c59b6d0a587b29ae72d4ff3f832f58b0651 ]

Every 8 bytes of the stack is tracked by a bpf_stack_state.
Within each bpf_stack_state, there is a 'u8 slot_type[8]' to track
the type of each byte.  Verifier tests slot_type[0] == STACK_SPILL
to decide if the spilled reg state is saved.  Verifier currently only
saves the reg state if the whole 8 bytes are spilled to the stack,
so checking the slot_type[7] is the same as checking slot_type[0].

The later patch will allow verifier to save the bounded scalar
reg also for <8 bytes spill.  There is a llvm patch [1] to ensure
the <8 bytes spill will be 8-byte aligned,  so checking
slot_type[7] instead of slot_type[0] is required.

While at it, this patch refactors the slot_type[0] == STACK_SPILL
test into a new function is_spilled_reg() and change the
slot_type[0] check to slot_type[7] check in there also.

[1] https://reviews.llvm.org/D109073

Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210922004934.624194-1-kafai@fb.com
Stable-dep-of: 529409ea92d5 ("bpf: propagate precision across all frames, not just the last one")
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/verifier.c

index 98d182d..a0a1061 100644 (file)
@@ -606,6 +606,14 @@ static const char *kernel_type_name(const struct btf* btf, u32 id)
        return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
 }
 
+/* The reg state of a pointer or a bounded scalar was saved when
+ * it was spilled to the stack.
+ */
+static bool is_spilled_reg(const struct bpf_stack_state *stack)
+{
+       return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
+}
+
 static void print_verifier_state(struct bpf_verifier_env *env,
                                 const struct bpf_func_state *state)
 {
@@ -709,7 +717,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                        continue;
                verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
                print_liveness(env, state->stack[i].spilled_ptr.live);
-               if (state->stack[i].slot_type[0] == STACK_SPILL) {
+               if (is_spilled_reg(&state->stack[i])) {
                        reg = &state->stack[i].spilled_ptr;
                        t = reg->type;
                        verbose(env, "=%s", reg_type_str(env, t));
@@ -2351,7 +2359,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
                                reg->precise = true;
                        }
                        for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
-                               if (func->stack[j].slot_type[0] != STACK_SPILL)
+                               if (!is_spilled_reg(&func->stack[j]))
                                        continue;
                                reg = &func->stack[j].spilled_ptr;
                                if (reg->type != SCALAR_VALUE)
@@ -2393,7 +2401,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
        }
 
        while (spi >= 0) {
-               if (func->stack[spi].slot_type[0] != STACK_SPILL) {
+               if (!is_spilled_reg(&func->stack[spi])) {
                        stack_mask = 0;
                        break;
                }
@@ -2492,7 +2500,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
                                return 0;
                        }
 
-                       if (func->stack[i].slot_type[0] != STACK_SPILL) {
+                       if (!is_spilled_reg(&func->stack[i])) {
                                stack_mask &= ~(1ull << i);
                                continue;
                        }
@@ -2682,7 +2690,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                /* regular write of data into stack destroys any spilled ptr */
                state->stack[spi].spilled_ptr.type = NOT_INIT;
                /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
-               if (state->stack[spi].slot_type[0] == STACK_SPILL)
+               if (is_spilled_reg(&state->stack[spi]))
                        for (i = 0; i < BPF_REG_SIZE; i++)
                                state->stack[spi].slot_type[i] = STACK_MISC;
 
@@ -2895,7 +2903,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
        stype = reg_state->stack[spi].slot_type;
        reg = &reg_state->stack[spi].spilled_ptr;
 
-       if (stype[0] == STACK_SPILL) {
+       if (is_spilled_reg(&reg_state->stack[spi])) {
                if (size != BPF_REG_SIZE) {
                        if (reg->type != SCALAR_VALUE) {
                                verbose_linfo(env, env->insn_idx, "; ");
@@ -4534,11 +4542,11 @@ static int check_stack_range_initialized(
                        goto mark;
                }
 
-               if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+               if (is_spilled_reg(&state->stack[spi]) &&
                    state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
                        goto mark;
 
-               if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+               if (is_spilled_reg(&state->stack[spi]) &&
                    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
                     env->allow_ptr_leaks)) {
                        if (clobber) {
@@ -10342,9 +10350,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
                         * return false to continue verification of this path
                         */
                        return false;
-               if (i % BPF_REG_SIZE)
+               if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
                        continue;
-               if (old->stack[spi].slot_type[0] != STACK_SPILL)
+               if (!is_spilled_reg(&old->stack[spi]))
                        continue;
                if (!regsafe(env, &old->stack[spi].spilled_ptr,
                             &cur->stack[spi].spilled_ptr, idmap))
@@ -10551,7 +10559,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
        }
 
        for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-               if (state->stack[i].slot_type[0] != STACK_SPILL)
+               if (!is_spilled_reg(&state->stack[i]))
                        continue;
                state_reg = &state->stack[i].spilled_ptr;
                if (state_reg->type != SCALAR_VALUE ||