Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[platform/kernel/linux-starfive.git] / kernel / bpf / verifier.c
index aedac2a..a4012b3 100644 (file)
@@ -1562,6 +1562,21 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
 
+static void reg_bounds_sync(struct bpf_reg_state *reg)
+{
+       /* We might have learned new bounds from the var_off. */
+       __update_reg_bounds(reg);
+       /* We might have learned something about the sign bit. */
+       __reg_deduce_bounds(reg);
+       /* We might have learned some bits from the bounds. */
+       __reg_bound_offset(reg);
+       /* Intersecting with the old var_off might have improved our bounds
+        * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+        * then new var_off is (0; 0x7f...fc) which improves our umax.
+        */
+       __update_reg_bounds(reg);
+}
+
 static bool __reg32_bound_s64(s32 a)
 {
        return a >= 0 && a <= S32_MAX;
@@ -1603,16 +1618,8 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
                 * so they do not impact tnum bounds calculation.
                 */
                __mark_reg64_unbounded(reg);
-               __update_reg_bounds(reg);
        }
-
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __reg_deduce_bounds(reg);
-       __reg_bound_offset(reg);
-       __update_reg_bounds(reg);
+       reg_bounds_sync(reg);
 }
 
 static bool __reg64_bound_s32(s64 a)
@@ -1628,7 +1635,6 @@ static bool __reg64_bound_u32(u64 a)
 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
 {
        __mark_reg32_unbounded(reg);
-
        if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
                reg->s32_min_value = (s32)reg->smin_value;
                reg->s32_max_value = (s32)reg->smax_value;
@@ -1637,14 +1643,7 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
                reg->u32_min_value = (u32)reg->umin_value;
                reg->u32_max_value = (u32)reg->umax_value;
        }
-
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __reg_deduce_bounds(reg);
-       __reg_bound_offset(reg);
-       __update_reg_bounds(reg);
+       reg_bounds_sync(reg);
 }
 
 /* Mark a register as having a completely unknown (scalar) value. */
@@ -5848,6 +5847,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
        struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
        enum bpf_arg_type arg_type = fn->arg_type[arg];
        enum bpf_reg_type type = reg->type;
+       u32 *arg_btf_id = NULL;
        int err = 0;
 
        if (arg_type == ARG_DONTCARE)
@@ -5884,7 +5884,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
                 */
                goto skip_type_check;
 
-       err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta);
+       /* arg_btf_id and arg_size are in a union. */
+       if (base_type(arg_type) == ARG_PTR_TO_BTF_ID)
+               arg_btf_id = fn->arg_btf_id[arg];
+
+       err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
        if (err)
                return err;
 
@@ -6011,6 +6015,11 @@ skip_type_check:
                 * next is_mem_size argument below.
                 */
                meta->raw_mode = arg_type & MEM_UNINIT;
+               if (arg_type & MEM_FIXED_SIZE) {
+                       err = check_helper_mem_access(env, regno,
+                                                     fn->arg_size[arg], false,
+                                                     meta);
+               }
        } else if (arg_type_is_mem_size(arg_type)) {
                bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
 
@@ -6400,11 +6409,19 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
        return count <= 1;
 }
 
-static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
-                                   enum bpf_arg_type arg_next)
+static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
 {
-       return (base_type(arg_curr) == ARG_PTR_TO_MEM) !=
-               arg_type_is_mem_size(arg_next);
+       bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
+       bool has_size = fn->arg_size[arg] != 0;
+       bool is_next_size = false;
+
+       if (arg + 1 < ARRAY_SIZE(fn->arg_type))
+               is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
+
+       if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
+               return is_next_size;
+
+       return has_size == is_next_size || is_next_size == is_fixed;
 }
 
 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
@@ -6415,11 +6432,11 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
         * helper function specification.
         */
        if (arg_type_is_mem_size(fn->arg1_type) ||
-           base_type(fn->arg5_type) == ARG_PTR_TO_MEM ||
-           check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
-           check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
-           check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
-           check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
+           check_args_pair_invalid(fn, 0) ||
+           check_args_pair_invalid(fn, 1) ||
+           check_args_pair_invalid(fn, 2) ||
+           check_args_pair_invalid(fn, 3) ||
+           check_args_pair_invalid(fn, 4))
                return false;
 
        return true;
@@ -6460,7 +6477,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
                if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
                        return false;
 
-               if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
+               if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
+                   /* arg_btf_id and arg_size are in a union. */
+                   (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
+                    !(fn->arg_type[i] & MEM_FIXED_SIZE)))
                        return false;
        }
 
@@ -6943,9 +6963,7 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
        ret_reg->s32_max_value = meta->msize_max_value;
        ret_reg->smin_value = -MAX_ERRNO;
        ret_reg->s32_min_value = -MAX_ERRNO;
-       __reg_deduce_bounds(ret_reg);
-       __reg_bound_offset(ret_reg);
-       __update_reg_bounds(ret_reg);
+       reg_bounds_sync(ret_reg);
 }
 
 static int
@@ -8202,11 +8220,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
                return -EINVAL;
-
-       __update_reg_bounds(dst_reg);
-       __reg_deduce_bounds(dst_reg);
-       __reg_bound_offset(dst_reg);
-
+       reg_bounds_sync(dst_reg);
        if (sanitize_check_bounds(env, insn, dst_reg) < 0)
                return -EACCES;
        if (sanitize_needed(opcode)) {
@@ -8944,10 +8958,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        /* ALU32 ops are zero extended into 64bit register */
        if (alu32)
                zext_32_to_64(dst_reg);
-
-       __update_reg_bounds(dst_reg);
-       __reg_deduce_bounds(dst_reg);
-       __reg_bound_offset(dst_reg);
+       reg_bounds_sync(dst_reg);
        return 0;
 }
 
@@ -9136,10 +9147,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                         insn->dst_reg);
                                }
                                zext_32_to_64(dst_reg);
-
-                               __update_reg_bounds(dst_reg);
-                               __reg_deduce_bounds(dst_reg);
-                               __reg_bound_offset(dst_reg);
+                               reg_bounds_sync(dst_reg);
                        }
                } else {
                        /* case: R = imm
@@ -9577,26 +9585,33 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
                return;
 
        switch (opcode) {
+       /* JEQ/JNE comparison doesn't change the register equivalence.
+        *
+        * r1 = r2;
+        * if (r1 == 42) goto label;
+        * ...
+        * label: // here both r1 and r2 are known to be 42.
+        *
+        * Hence when marking register as known preserve it's ID.
+        */
        case BPF_JEQ:
+               if (is_jmp32) {
+                       __mark_reg32_known(true_reg, val32);
+                       true_32off = tnum_subreg(true_reg->var_off);
+               } else {
+                       ___mark_reg_known(true_reg, val);
+                       true_64off = true_reg->var_off;
+               }
+               break;
        case BPF_JNE:
-       {
-               struct bpf_reg_state *reg =
-                       opcode == BPF_JEQ ? true_reg : false_reg;
-
-               /* JEQ/JNE comparison doesn't change the register equivalence.
-                * r1 = r2;
-                * if (r1 == 42) goto label;
-                * ...
-                * label: // here both r1 and r2 are known to be 42.
-                *
-                * Hence when marking register as known preserve it's ID.
-                */
-               if (is_jmp32)
-                       __mark_reg32_known(reg, val32);
-               else
-                       ___mark_reg_known(reg, val);
+               if (is_jmp32) {
+                       __mark_reg32_known(false_reg, val32);
+                       false_32off = tnum_subreg(false_reg->var_off);
+               } else {
+                       ___mark_reg_known(false_reg, val);
+                       false_64off = false_reg->var_off;
+               }
                break;
-       }
        case BPF_JSET:
                if (is_jmp32) {
                        false_32off = tnum_and(false_32off, tnum_const(~val32));
@@ -9735,21 +9750,8 @@ static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
                                                        dst_reg->smax_value);
        src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
                                                             dst_reg->var_off);
-       /* We might have learned new bounds from the var_off. */
-       __update_reg_bounds(src_reg);
-       __update_reg_bounds(dst_reg);
-       /* We might have learned something about the sign bit. */
-       __reg_deduce_bounds(src_reg);
-       __reg_deduce_bounds(dst_reg);
-       /* We might have learned some bits from the bounds. */
-       __reg_bound_offset(src_reg);
-       __reg_bound_offset(dst_reg);
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __update_reg_bounds(src_reg);
-       __update_reg_bounds(dst_reg);
+       reg_bounds_sync(src_reg);
+       reg_bounds_sync(dst_reg);
 }
 
 static void reg_combine_min_max(struct bpf_reg_state *true_src,
@@ -10901,7 +10903,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
                        goto err_free;
                ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
                scalar_return =
-                       btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
+                       btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
                if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
                        verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
                        goto err_free;
@@ -14829,8 +14831,8 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
        }
 
        if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
-           prog->type != BPF_PROG_TYPE_LSM) {
-               verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
+           prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
+               verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
                return -EINVAL;
        }