From: Piotr Krysiuk Date: Tue, 16 Mar 2021 07:26:25 +0000 (+0100) Subject: bpf: Simplify alu_limit masking for pointer arithmetic X-Git-Tag: v5.10.25~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6a3504bf4006dd903eac93d37cdbad45726272b1;p=platform%2Fkernel%2Flinux-rpi.git bpf: Simplify alu_limit masking for pointer arithmetic commit b5871dca250cd391885218b99cc015aca1a51aea upstream. Instead of having the mov32 with aux->alu_limit - 1 immediate, move this operation to retrieve_ptr_limit() instead to simplify the logic and to allow for subsequent sanity boundary checks inside retrieve_ptr_limit(). This avoids in future that at the time of the verifier masking rewrite we'd run into an underflow which would not sign extend due to the nature of mov32 instruction. Signed-off-by: Piotr Krysiuk Co-developed-by: Daniel Borkmann Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index be716ef..e6b238a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5342,16 +5342,16 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, */ off = ptr_reg->off + ptr_reg->var_off.value; if (mask_to_left) - *ptr_limit = MAX_BPF_STACK + off + 1; + *ptr_limit = MAX_BPF_STACK + off; else - *ptr_limit = -off; + *ptr_limit = -off - 1; return 0; case PTR_TO_MAP_VALUE: if (mask_to_left) { - *ptr_limit = ptr_reg->umax_value + ptr_reg->off + 1; + *ptr_limit = ptr_reg->umax_value + ptr_reg->off; } else { off = ptr_reg->smin_value + ptr_reg->off; - *ptr_limit = ptr_reg->map_ptr->value_size - off; + *ptr_limit = ptr_reg->map_ptr->value_size - off - 1; } return 0; default: @@ -10946,7 +10946,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) off_reg = issrc ? insn->src_reg : insn->dst_reg; if (isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); - *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);