bpf: Small BPF verifier log improvements
authorMykola Lysenko <mykolal@fb.com>
Tue, 1 Mar 2022 22:27:45 +0000 (14:27 -0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Thu, 3 Mar 2022 15:54:10 +0000 (16:54 +0100)
In particular these include:

  1) Remove output of inv for scalars in print_verifier_state
  2) Replace inv with scalar in verifier error messages
  3) Remove _value suffixes for umin/umax/s32_min/etc (except map_value)
  4) Remove output of id=0
  5) Remove output of ref_obj_id=0

Signed-off-by: Mykola Lysenko <mykolal@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220301222745.1667206-1-mykolal@fb.com
20 files changed:
kernel/bpf/verifier.c
tools/testing/selftests/bpf/prog_tests/align.c
tools/testing/selftests/bpf/prog_tests/log_buf.c
tools/testing/selftests/bpf/verifier/atomic_invalid.c
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/bpf/verifier/ctx.c
tools/testing/selftests/bpf/verifier/direct_packet_access.c
tools/testing/selftests/bpf/verifier/helper_access_var_len.c
tools/testing/selftests/bpf/verifier/jmp32.c
tools/testing/selftests/bpf/verifier/precise.c
tools/testing/selftests/bpf/verifier/raw_stack.c
tools/testing/selftests/bpf/verifier/ref_tracking.c
tools/testing/selftests/bpf/verifier/search_pruning.c
tools/testing/selftests/bpf/verifier/sock.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_illegal_alu.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/bpf/verifier/var_off.c

index d7473fe..a57db4b 100644 (file)
@@ -539,7 +539,7 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
        char postfix[16] = {0}, prefix[32] = {0};
        static const char * const str[] = {
                [NOT_INIT]              = "?",
-               [SCALAR_VALUE]          = "inv",
+               [SCALAR_VALUE]          = "scalar",
                [PTR_TO_CTX]            = "ctx",
                [CONST_PTR_TO_MAP]      = "map_ptr",
                [PTR_TO_MAP_VALUE]      = "map_value",
@@ -685,74 +685,80 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                        continue;
                verbose(env, " R%d", i);
                print_liveness(env, reg->live);
-               verbose(env, "=%s", reg_type_str(env, t));
+               verbose(env, "=");
                if (t == SCALAR_VALUE && reg->precise)
                        verbose(env, "P");
                if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
                    tnum_is_const(reg->var_off)) {
                        /* reg->off should be 0 for SCALAR_VALUE */
+                       verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
                        verbose(env, "%lld", reg->var_off.value + reg->off);
                } else {
+                       const char *sep = "";
+
+                       verbose(env, "%s", reg_type_str(env, t));
                        if (base_type(t) == PTR_TO_BTF_ID ||
                            base_type(t) == PTR_TO_PERCPU_BTF_ID)
                                verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
-                       verbose(env, "(id=%d", reg->id);
-                       if (reg_type_may_be_refcounted_or_null(t))
-                               verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
+                       verbose(env, "(");
+/*
+ * _a stands for append, was shortened to avoid multiline statements below.
+ * This macro is used to output a comma separated list of attributes.
+ */
+#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
+
+                       if (reg->id)
+                               verbose_a("id=%d", reg->id);
+                       if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
+                               verbose_a("ref_obj_id=%d", reg->ref_obj_id);
                        if (t != SCALAR_VALUE)
-                               verbose(env, ",off=%d", reg->off);
+                               verbose_a("off=%d", reg->off);
                        if (type_is_pkt_pointer(t))
-                               verbose(env, ",r=%d", reg->range);
+                               verbose_a("r=%d", reg->range);
                        else if (base_type(t) == CONST_PTR_TO_MAP ||
                                 base_type(t) == PTR_TO_MAP_KEY ||
                                 base_type(t) == PTR_TO_MAP_VALUE)
-                               verbose(env, ",ks=%d,vs=%d",
-                                       reg->map_ptr->key_size,
-                                       reg->map_ptr->value_size);
+                               verbose_a("ks=%d,vs=%d",
+                                         reg->map_ptr->key_size,
+                                         reg->map_ptr->value_size);
                        if (tnum_is_const(reg->var_off)) {
                                /* Typically an immediate SCALAR_VALUE, but
                                 * could be a pointer whose offset is too big
                                 * for reg->off
                                 */
-                               verbose(env, ",imm=%llx", reg->var_off.value);
+                               verbose_a("imm=%llx", reg->var_off.value);
                        } else {
                                if (reg->smin_value != reg->umin_value &&
                                    reg->smin_value != S64_MIN)
-                                       verbose(env, ",smin_value=%lld",
-                                               (long long)reg->smin_value);
+                                       verbose_a("smin=%lld", (long long)reg->smin_value);
                                if (reg->smax_value != reg->umax_value &&
                                    reg->smax_value != S64_MAX)
-                                       verbose(env, ",smax_value=%lld",
-                                               (long long)reg->smax_value);
+                                       verbose_a("smax=%lld", (long long)reg->smax_value);
                                if (reg->umin_value != 0)
-                                       verbose(env, ",umin_value=%llu",
-                                               (unsigned long long)reg->umin_value);
+                                       verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
                                if (reg->umax_value != U64_MAX)
-                                       verbose(env, ",umax_value=%llu",
-                                               (unsigned long long)reg->umax_value);
+                                       verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
                                if (!tnum_is_unknown(reg->var_off)) {
                                        char tn_buf[48];
 
                                        tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-                                       verbose(env, ",var_off=%s", tn_buf);
+                                       verbose_a("var_off=%s", tn_buf);
                                }
                                if (reg->s32_min_value != reg->smin_value &&
                                    reg->s32_min_value != S32_MIN)
-                                       verbose(env, ",s32_min_value=%d",
-                                               (int)(reg->s32_min_value));
+                                       verbose_a("s32_min=%d", (int)(reg->s32_min_value));
                                if (reg->s32_max_value != reg->smax_value &&
                                    reg->s32_max_value != S32_MAX)
-                                       verbose(env, ",s32_max_value=%d",
-                                               (int)(reg->s32_max_value));
+                                       verbose_a("s32_max=%d", (int)(reg->s32_max_value));
                                if (reg->u32_min_value != reg->umin_value &&
                                    reg->u32_min_value != U32_MIN)
-                                       verbose(env, ",u32_min_value=%d",
-                                               (int)(reg->u32_min_value));
+                                       verbose_a("u32_min=%d", (int)(reg->u32_min_value));
                                if (reg->u32_max_value != reg->umax_value &&
                                    reg->u32_max_value != U32_MAX)
-                                       verbose(env, ",u32_max_value=%d",
-                                               (int)(reg->u32_max_value));
+                                       verbose_a("u32_max=%d", (int)(reg->u32_max_value));
                        }
+#undef verbose_a
+
                        verbose(env, ")");
                }
        }
@@ -777,7 +783,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                if (is_spilled_reg(&state->stack[i])) {
                        reg = &state->stack[i].spilled_ptr;
                        t = reg->type;
-                       verbose(env, "=%s", reg_type_str(env, t));
+                       verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
                        if (t == SCALAR_VALUE && reg->precise)
                                verbose(env, "P");
                        if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
index 0ee29e1..970f091 100644 (file)
@@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {0, "R1=ctx(id=0,off=0,imm=0)"},
+                       {0, "R1=ctx(off=0,imm=0)"},
                        {0, "R10=fp0"},
-                       {0, "R3_w=inv2"},
-                       {1, "R3_w=inv4"},
-                       {2, "R3_w=inv8"},
-                       {3, "R3_w=inv16"},
-                       {4, "R3_w=inv32"},
+                       {0, "R3_w=2"},
+                       {1, "R3_w=4"},
+                       {2, "R3_w=8"},
+                       {3, "R3_w=16"},
+                       {4, "R3_w=32"},
                },
        },
        {
@@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {0, "R1=ctx(id=0,off=0,imm=0)"},
+                       {0, "R1=ctx(off=0,imm=0)"},
                        {0, "R10=fp0"},
-                       {0, "R3_w=inv1"},
-                       {1, "R3_w=inv2"},
-                       {2, "R3_w=inv4"},
-                       {3, "R3_w=inv8"},
-                       {4, "R3_w=inv16"},
-                       {5, "R3_w=inv1"},
-                       {6, "R4_w=inv32"},
-                       {7, "R4_w=inv16"},
-                       {8, "R4_w=inv8"},
-                       {9, "R4_w=inv4"},
-                       {10, "R4_w=inv2"},
+                       {0, "R3_w=1"},
+                       {1, "R3_w=2"},
+                       {2, "R3_w=4"},
+                       {3, "R3_w=8"},
+                       {4, "R3_w=16"},
+                       {5, "R3_w=1"},
+                       {6, "R4_w=32"},
+                       {7, "R4_w=16"},
+                       {8, "R4_w=8"},
+                       {9, "R4_w=4"},
+                       {10, "R4_w=2"},
                },
        },
        {
@@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {0, "R1=ctx(id=0,off=0,imm=0)"},
+                       {0, "R1=ctx(off=0,imm=0)"},
                        {0, "R10=fp0"},
-                       {0, "R3_w=inv4"},
-                       {1, "R3_w=inv8"},
-                       {2, "R3_w=inv10"},
-                       {3, "R4_w=inv8"},
-                       {4, "R4_w=inv12"},
-                       {5, "R4_w=inv14"},
+                       {0, "R3_w=4"},
+                       {1, "R3_w=8"},
+                       {2, "R3_w=10"},
+                       {3, "R4_w=8"},
+                       {4, "R4_w=12"},
+                       {5, "R4_w=14"},
                },
        },
        {
@@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {0, "R1=ctx(id=0,off=0,imm=0)"},
+                       {0, "R1=ctx(off=0,imm=0)"},
                        {0, "R10=fp0"},
-                       {0, "R3_w=inv7"},
-                       {1, "R3_w=inv7"},
-                       {2, "R3_w=inv14"},
-                       {3, "R3_w=inv56"},
+                       {0, "R3_w=7"},
+                       {1, "R3_w=7"},
+                       {2, "R3_w=14"},
+                       {3, "R3_w=56"},
                },
        },
 
@@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
-                       {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-                       {7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
-                       {8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-                       {9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-                       {10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
-                       {12, "R3_w=pkt_end(id=0,off=0,imm=0)"},
-                       {17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-                       {18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
-                       {19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
-                       {20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-                       {21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-                       {22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+                       {6, "R0_w=pkt(off=8,r=8,imm=0)"},
+                       {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+                       {7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
+                       {8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+                       {9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
+                       {10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
+                       {12, "R3_w=pkt_end(off=0,imm=0)"},
+                       {17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+                       {18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
+                       {19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
+                       {20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
+                       {21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+                       {22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
                },
        },
        {
@@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-                       {7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
-                       {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-                       {9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
-                       {10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
-                       {11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
-                       {12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-                       {13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
-                       {14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-                       {15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+                       {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+                       {7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+                       {8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+                       {9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+                       {10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
+                       {11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+                       {12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+                       {13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+                       {14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
+                       {15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
                },
        },
        {
@@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = {
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .matches = {
-                       {2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
-                       {4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
-                       {5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
-                       {9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
-                       {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
-                       {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-                       {13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
-                       {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+                       {2, "R5_w=pkt(off=0,r=0,imm=0)"},
+                       {4, "R5_w=pkt(off=14,r=0,imm=0)"},
+                       {5, "R4_w=pkt(off=14,r=0,imm=0)"},
+                       {9, "R2=pkt(off=0,r=18,imm=0)"},
+                       {10, "R5=pkt(off=14,r=18,imm=0)"},
+                       {10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+                       {13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
+                       {14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
                },
        },
        {
@@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = {
                        /* Calculated offset in R6 has unknown value, but known
                         * alignment of 4.
                         */
-                       {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
-                       {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+                       {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Offset is added to packet pointer R5, resulting in
                         * known fixed offset, and variable offset from R6.
                         */
-                       {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* At the time the word size load is performed from R5,
                         * it's total offset is NET_IP_ALIGN + reg->off (0) +
                         * reg->aux_off (14) which is 16.  Then the variable
                         * offset is considered using reg->aux_off_align which
                         * is 4 and meets the load's requirements.
                         */
-                       {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
-                       {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+                       {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Variable offset is added to R5 packet pointer,
                         * resulting in auxiliary alignment of 4.
                         */
-                       {17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Constant offset is added to R5, resulting in
                         * reg->off of 14.
                         */
-                       {18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* At the time the word size load is performed from R5,
                         * its total fixed offset is NET_IP_ALIGN + reg->off
                         * (14) which is 16.  Then the variable offset is 4-byte
                         * aligned, so the total offset is 4-byte aligned and
                         * meets the load's requirements.
                         */
-                       {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
-                       {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+                       {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Constant offset is added to R5 packet pointer,
                         * resulting in reg->off value of 14.
                         */
-                       {25, "R5_w=pkt(id=0,off=14,r=8"},
+                       {25, "R5_w=pkt(off=14,r=8"},
                        /* Variable offset is added to R5, resulting in a
                         * variable offset of (4n).
                         */
-                       {26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Constant is added to R5 again, setting reg->off to 18. */
-                       {27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
                        /* And once more we add a variable; resulting var_off
                         * is still (4n), fixed offset is not changed.
                         * Also, we create a new reg->id.
                         */
-                       {28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+                       {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
                        /* At the time the word size load is performed from R5,
                         * its total fixed offset is NET_IP_ALIGN + reg->off (18)
                         * which is 20.  Then the variable offset is (4n), so
                         * the total offset is 4-byte aligned and meets the
                         * load's requirements.
                         */
-                       {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
-                       {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+                       {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+                       {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
                },
        },
        {
@@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = {
                        /* Calculated offset in R6 has unknown value, but known
                         * alignment of 4.
                         */
-                       {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
-                       {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+                       {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Adding 14 makes R6 be (4n+2) */
-                       {8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       {8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
                        /* Packet pointer has (4n+2) offset */
-                       {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
-                       {12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
+                       {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
                        /* At the time the word size load is performed from R5,
                         * its total fixed offset is NET_IP_ALIGN + reg->off (0)
                         * which is 2.  Then the variable offset is (4n+2), so
                         * the total offset is 4-byte aligned and meets the
                         * load's requirements.
                         */
-                       {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
                        /* Newly read value in R6 was shifted left by 2, so has
                         * known alignment of 4.
                         */
-                       {17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Added (4n) to packet pointer's (4n+2) var_off, giving
                         * another (4n+2).
                         */
-                       {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
-                       {20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+                       {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
+                       {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
                        /* At the time the word size load is performed from R5,
                         * its total fixed offset is NET_IP_ALIGN + reg->off (0)
                         * which is 2.  Then the variable offset is (4n+2), so
                         * the total offset is 4-byte aligned and meets the
                         * load's requirements.
                         */
-                       {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+                       {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
                },
        },
        {
@@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = REJECT,
                .matches = {
-                       {3, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+                       {3, "R5_w=pkt_end(off=0,imm=0)"},
                        /* (ptr - ptr) << 2 == unknown, (4n) */
-                       {5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
+                       {5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
                        /* (4n) + 14 == (4n+2).  We blow our bounds, because
                         * the add could overflow.
                         */
-                       {6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+                       {6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
                        /* Checked s>=0 */
-                       {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+                       {9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
                        /* packet pointer + nonnegative (4n+2) */
-                       {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
-                       {12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+                       {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+                       {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
                        /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
                         * We checked the bounds, but it might have been able
                         * to overflow if the packet pointer started in the
@@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = {
                         * So we did not get a 'range' on R6, and the access
                         * attempt will fail.
                         */
-                       {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+                       {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
                }
        },
        {
@@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = {
                        /* Calculated offset in R6 has unknown value, but known
                         * alignment of 4.
                         */
-                       {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
-                       {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+                       {8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Adding 14 makes R6 be (4n+2) */
-                       {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       {9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
                        /* New unknown value in R7 is (4n) */
-                       {10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
                        /* Subtracting it from R6 blows our unsigned bounds */
-                       {11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+                       {11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
                        /* Checked s>= 0 */
-                       {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       {14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
                        /* At the time the word size load is performed from R5,
                         * its total fixed offset is NET_IP_ALIGN + reg->off (0)
                         * which is 2.  Then the variable offset is (4n+2), so
                         * the total offset is 4-byte aligned and meets the
                         * load's requirements.
                         */
-                       {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
 
                },
        },
@@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = {
                        /* Calculated offset in R6 has unknown value, but known
                         * alignment of 4.
                         */
-                       {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
-                       {9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+                       {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+                       {9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
                        /* Adding 14 makes R6 be (4n+2) */
-                       {10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+                       {10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
                        /* Subtracting from packet pointer overflows ubounds */
-                       {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
+                       {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
                        /* New unknown value in R7 is (4n), >= 76 */
-                       {14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+                       {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
                        /* Adding it to packet pointer gives nice bounds again */
-                       {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+                       {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
                        /* At the time the word size load is performed from R5,
                         * its total fixed offset is NET_IP_ALIGN + reg->off (0)
                         * which is 2.  Then the variable offset is (4n+2), so
                         * the total offset is 4-byte aligned and meets the
                         * load's requirements.
                         */
-                       {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+                       {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
                },
        },
 };
@@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test)
                        /* Check the next line as well in case the previous line
                         * did not have a corresponding bpf insn. Example:
                         * func#0 @0
-                        * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0
-                        * 0: (b7) r3 = 2                 ; R3_w=inv2
+                        * 0: R1=ctx(off=0,imm=0) R10=fp0
+                        * 0: (b7) r3 = 2                 ; R3_w=2
                         */
                        if (!strstr(line_ptr, m.match)) {
                                cur_line = -1;
index 1ef377a..fe9a23e 100644 (file)
@@ -78,7 +78,7 @@ static void obj_load_log_buf(void)
        ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
                      "libbpf_log_not_empty");
        ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
-       ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"),
+       ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),
                      "good_log_verbose");
        ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
                      "bad_log_not_empty");
@@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void)
        opts.log_level = 2;
        fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
                           good_prog_insns, good_prog_insn_cnt, &opts);
-       ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2");
+       ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");
        ASSERT_GE(fd, 0, "good_fd2");
        if (fd >= 0)
                close(fd);
index 3927272..25f4ac1 100644 (file)
@@ -1,6 +1,6 @@
-#define __INVALID_ATOMIC_ACCESS_TEST(op)                                       \
+#define __INVALID_ATOMIC_ACCESS_TEST(op)                               \
        {                                                               \
-               "atomic " #op " access through non-pointer ",                   \
+               "atomic " #op " access through non-pointer ",           \
                .insns = {                                              \
                        BPF_MOV64_IMM(BPF_REG_0, 1),                    \
                        BPF_MOV64_IMM(BPF_REG_1, 0),                    \
@@ -9,7 +9,7 @@
                        BPF_EXIT_INSN(),                                \
                },                                                      \
                .result = REJECT,                                       \
-               .errstr = "R1 invalid mem access 'inv'"                 \
+               .errstr = "R1 invalid mem access 'scalar'"              \
        }
 __INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
 __INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
index e061e87..33125d5 100644 (file)
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT
 },
index 0a8ea60..f890333 100644 (file)
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "R0 invalid mem access 'inv'",
+       .errstr = "R0 invalid mem access 'scalar'",
 },
 {
        "calls: multiple ret types in subprog 2",
        BPF_EXIT_INSN(),
        },
        .result = REJECT,
-       .errstr = "R6 invalid mem access 'inv'",
+       .errstr = "R6 invalid mem access 'scalar'",
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .fixup_map_hash_8b = { 12, 22 },
        .result = REJECT,
-       .errstr = "R0 invalid mem access 'inv'",
+       .errstr = "R0 invalid mem access 'scalar'",
 },
 {
        "calls: pkt_ptr spill into caller stack",
index 2308086..60f6fbe 100644 (file)
        .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
        .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
        .result = REJECT,
-       .errstr = "R1 type=inv expected=ctx",
+       .errstr = "R1 type=scalar expected=ctx",
 },
 {
        "pass ctx or null check, 4: ctx - const",
        .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
        .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
        .result = REJECT,
-       .errstr = "R1 type=inv expected=ctx",
+       .errstr = "R1 type=scalar expected=ctx",
 },
index ac1e19d..11acd18 100644 (file)
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr = "R2 invalid mem access 'inv'",
+       .errstr = "R2 invalid mem access 'scalar'",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
index 0ab7f1d..a6c869a 100644 (file)
        BPF_EMIT_CALL(BPF_FUNC_csum_diff),
        BPF_EXIT_INSN(),
        },
-       .errstr = "R1 type=inv expected=fp",
+       .errstr = "R1 type=scalar expected=fp",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
        BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
        BPF_EXIT_INSN(),
        },
-       .errstr = "R1 type=inv expected=fp",
+       .errstr = "R1 type=scalar expected=fp",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
        BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
        BPF_EXIT_INSN(),
        },
-       .errstr = "R1 type=inv expected=fp",
+       .errstr = "R1 type=scalar expected=fp",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
index 1c857b2..6ddc418 100644 (file)
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
index 6dc8003..9e75442 100644 (file)
@@ -27,7 +27,7 @@
        BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
        BPF_EXIT_INSN(),
 
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
        BPF_MOV64_IMM(BPF_REG_3, 0),
@@ -87,7 +87,7 @@
        BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
        BPF_EXIT_INSN(),
 
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
        BPF_MOV64_IMM(BPF_REG_3, 0),
index cc8e8c3..eb5ed93 100644 (file)
        BPF_EXIT_INSN(),
        },
        .result = REJECT,
-       .errstr = "R0 invalid mem access 'inv'",
+       .errstr = "R0 invalid mem access 'scalar'",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
        BPF_EXIT_INSN(),
        },
        .result = REJECT,
-       .errstr = "R3 invalid mem access 'inv'",
+       .errstr = "R3 invalid mem access 'scalar'",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
index 3b6ee00..fbd6825 100644 (file)
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-       .errstr = "type=inv expected=sock",
+       .errstr = "type=scalar expected=sock",
        .result = REJECT,
 },
 {
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-       .errstr = "type=inv expected=sock",
+       .errstr = "type=scalar expected=sock",
        .result = REJECT,
 },
 {
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-       .errstr = "type=inv expected=sock",
+       .errstr = "type=scalar expected=sock",
        .result = REJECT,
 },
 {
index 6825197..68b14fd 100644 (file)
                BPF_EXIT_INSN(),
        },
        .fixup_map_hash_8b = { 3 },
-       .errstr = "R6 invalid mem access 'inv'",
+       .errstr = "R6 invalid mem access 'scalar'",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
index 8c224ea..86b24ca 100644 (file)
        .fixup_sk_storage_map = { 11 },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "R3 type=inv expected=fp",
+       .errstr = "R3 type=scalar expected=fp",
 },
 {
        "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value",
index 8cfc534..e23f071 100644 (file)
        BPF_EXIT_INSN(),
        },
        .errstr_unpriv = "attempt to corrupt spilled",
-       .errstr = "R0 invalid mem access 'inv",
+       .errstr = "R0 invalid mem access 'scalar'",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
        BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
        /* r0 = r2 */
        BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
-       /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
+       /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-       /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
+       /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
        /* r0 = r2 */
        BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
-       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
        /* r0 = r2 */
        BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
-       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
        /* r0 = r2 */
        BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
-       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
        /* r0 = r2 */
        BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
-       /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+       /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-       /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+       /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
-       /* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
+       /* *(u32 *)(r10 -8) = r4 R4=umax=40 */
        BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
        /* r4 = (*u32 *)(r10 - 8) */
        BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
-       /* r2 += r4 R2=pkt R4=inv,umax=40 */
+       /* r2 += r4 R2=pkt R4=umax=40 */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
-       /* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
+       /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
        BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
        /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
index 111801a..878ca26 100644 (file)
        BPF_EXIT_INSN(),
        },
        .result = REJECT,
-       .errstr = "R1 type=inv expected=ctx",
+       .errstr = "R1 type=scalar expected=ctx",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
 {
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R7 invalid mem access 'inv'",
+       .errstr_unpriv = "R7 invalid mem access 'scalar'",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 0,
index 4890628..d6f29eb 100644 (file)
@@ -64,7 +64,7 @@
        },
        .fixup_map_hash_48b = { 3 },
        .errstr_unpriv = "R0 pointer arithmetic prohibited",
-       .errstr = "invalid mem access 'inv'",
+       .errstr = "invalid mem access 'scalar'",
        .result = REJECT,
        .result_unpriv = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -89,7 +89,7 @@
        },
        .fixup_map_hash_48b = { 3 },
        .errstr_unpriv = "leaking pointer from stack off -8",
-       .errstr = "R0 invalid mem access 'inv'",
+       .errstr = "R0 invalid mem access 'scalar'",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
index 359f3e8..249187d 100644 (file)
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .errstr_unpriv = "R0 invalid mem access 'scalar'",
        .retval = 0,
 },
 {
        },
        .fixup_map_array_48b = { 3 },
        .result = REJECT,
-       .errstr = "R0 invalid mem access 'inv'",
+       .errstr = "R0 invalid mem access 'scalar'",
        .errstr_unpriv = "R0 pointer -= pointer prohibited",
 },
 {
index eab1f7f..187c6f6 100644 (file)
         * write might have overwritten the spilled pointer (i.e. we lose track
         * of the spilled register when we analyze the write).
         */
-       .errstr = "R2 invalid mem access 'inv'",
+       .errstr = "R2 invalid mem access 'scalar'",
        .result = REJECT,
 },
 {