selftests/bpf: verifier/xadd.c converted to inline assembly
authorEduard Zingerman <eddyz87@gmail.com>
Sat, 25 Mar 2023 02:55:22 +0000 (04:55 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Sun, 26 Mar 2023 00:02:06 +0000 (17:02 -0700)
Test verifier/xadd.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-42-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/verifier.c
tools/testing/selftests/bpf/progs/verifier_xadd.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/xadd.c [deleted file]

index 44350e3..cd56fe5 100644 (file)
@@ -38,6 +38,7 @@
 #include "verifier_value.skel.h"
 #include "verifier_value_or_null.skel.h"
 #include "verifier_var_off.skel.h"
+#include "verifier_xadd.skel.h"
 
 __maybe_unused
 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory)
@@ -98,3 +99,4 @@ void test_verifier_value_adj_spill(void)      { RUN(verifier_value_adj_spill); }
 void test_verifier_value(void)                { RUN(verifier_value); }
 void test_verifier_value_or_null(void)        { RUN(verifier_value_or_null); }
 void test_verifier_var_off(void)              { RUN(verifier_var_off); }
+void test_verifier_xadd(void)                 { RUN(verifier_xadd); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_xadd.c b/tools/testing/selftests/bpf/progs/verifier_xadd.c
new file mode 100644 (file)
index 0000000..05a0a55
--- /dev/null
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(max_entries, 1);
+       __type(key, long long);
+       __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("tc")
+__description("xadd/w check unaligned stack")
+__failure __msg("misaligned stack access off")
+__naked void xadd_w_check_unaligned_stack(void)
+{
+       asm volatile ("                                 \
+       r0 = 1;                                         \
+       *(u64*)(r10 - 8) = r0;                          \
+       lock *(u32 *)(r10 - 7) += w0;                   \
+       r0 = *(u64*)(r10 - 8);                          \
+       exit;                                           \
+"      ::: __clobber_all);
+}
+
+SEC("tc")
+__description("xadd/w check unaligned map")
+__failure __msg("misaligned value access off")
+__naked void xadd_w_check_unaligned_map(void)
+{
+       asm volatile ("                                 \
+       r1 = 0;                                         \
+       *(u64*)(r10 - 8) = r1;                          \
+       r2 = r10;                                       \
+       r2 += -8;                                       \
+       r1 = %[map_hash_8b] ll;                         \
+       call %[bpf_map_lookup_elem];                    \
+       if r0 != 0 goto l0_%=;                          \
+       exit;                                           \
+l0_%=: r1 = 1;                                         \
+       lock *(u32 *)(r0 + 3) += w1;                    \
+       r0 = *(u32*)(r0 + 3);                           \
+       exit;                                           \
+"      :
+       : __imm(bpf_map_lookup_elem),
+         __imm_addr(map_hash_8b)
+       : __clobber_all);
+}
+
+SEC("xdp")
+__description("xadd/w check unaligned pkt")
+__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void xadd_w_check_unaligned_pkt(void)
+{
+       asm volatile ("                                 \
+       r2 = *(u32*)(r1 + %[xdp_md_data]);              \
+       r3 = *(u32*)(r1 + %[xdp_md_data_end]);          \
+       r1 = r2;                                        \
+       r1 += 8;                                        \
+       if r1 < r3 goto l0_%=;                          \
+       r0 = 99;                                        \
+       goto l1_%=;                                     \
+l0_%=: r0 = 1;                                         \
+       r1 = 0;                                         \
+       *(u32*)(r2 + 0) = r1;                           \
+       r1 = 0;                                         \
+       *(u32*)(r2 + 3) = r1;                           \
+       lock *(u32 *)(r2 + 1) += w0;                    \
+       lock *(u32 *)(r2 + 2) += w0;                    \
+       r0 = *(u32*)(r2 + 1);                           \
+l1_%=: exit;                                           \
+"      :
+       : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+         __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+       : __clobber_all);
+}
+
+SEC("tc")
+__description("xadd/w check whether src/dst got mangled, 1")
+__success __retval(3)
+__naked void src_dst_got_mangled_1(void)
+{
+       asm volatile ("                                 \
+       r0 = 1;                                         \
+       r6 = r0;                                        \
+       r7 = r10;                                       \
+       *(u64*)(r10 - 8) = r0;                          \
+       lock *(u64 *)(r10 - 8) += r0;                   \
+       lock *(u64 *)(r10 - 8) += r0;                   \
+       if r6 != r0 goto l0_%=;                         \
+       if r7 != r10 goto l0_%=;                        \
+       r0 = *(u64*)(r10 - 8);                          \
+       exit;                                           \
+l0_%=: r0 = 42;                                        \
+       exit;                                           \
+"      ::: __clobber_all);
+}
+
+SEC("tc")
+__description("xadd/w check whether src/dst got mangled, 2")
+__success __retval(3)
+__naked void src_dst_got_mangled_2(void)
+{
+       asm volatile ("                                 \
+       r0 = 1;                                         \
+       r6 = r0;                                        \
+       r7 = r10;                                       \
+       *(u32*)(r10 - 8) = r0;                          \
+       lock *(u32 *)(r10 - 8) += w0;                   \
+       lock *(u32 *)(r10 - 8) += w0;                   \
+       if r6 != r0 goto l0_%=;                         \
+       if r7 != r10 goto l0_%=;                        \
+       r0 = *(u32*)(r10 - 8);                          \
+       exit;                                           \
+l0_%=: r0 = 42;                                        \
+       exit;                                           \
+"      ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c
deleted file mode 100644 (file)
index b96ef35..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-{
-       "xadd/w check unaligned stack",
-       .insns = {
-       BPF_MOV64_IMM(BPF_REG_0, 1),
-       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
-       BPF_EXIT_INSN(),
-       },
-       .result = REJECT,
-       .errstr = "misaligned stack access off",
-       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-},
-{
-       "xadd/w check unaligned map",
-       .insns = {
-       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-       BPF_LD_MAP_FD(BPF_REG_1, 0),
-       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
-       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
-       BPF_EXIT_INSN(),
-       BPF_MOV64_IMM(BPF_REG_1, 1),
-       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
-       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
-       BPF_EXIT_INSN(),
-       },
-       .fixup_map_hash_8b = { 3 },
-       .result = REJECT,
-       .errstr = "misaligned value access off",
-       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-},
-{
-       "xadd/w check unaligned pkt",
-       .insns = {
-       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
-       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                   offsetof(struct xdp_md, data_end)),
-       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
-       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
-       BPF_MOV64_IMM(BPF_REG_0, 99),
-       BPF_JMP_IMM(BPF_JA, 0, 0, 6),
-       BPF_MOV64_IMM(BPF_REG_0, 1),
-       BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
-       BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
-       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
-       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
-       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
-       BPF_EXIT_INSN(),
-       },
-       .result = REJECT,
-       .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
-       .prog_type = BPF_PROG_TYPE_XDP,
-       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
-},
-{
-       "xadd/w check whether src/dst got mangled, 1",
-       .insns = {
-       BPF_MOV64_IMM(BPF_REG_0, 1),
-       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
-       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
-       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
-       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
-       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
-       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
-       BPF_EXIT_INSN(),
-       BPF_MOV64_IMM(BPF_REG_0, 42),
-       BPF_EXIT_INSN(),
-       },
-       .result = ACCEPT,
-       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-       .retval = 3,
-},
-{
-       "xadd/w check whether src/dst got mangled, 2",
-       .insns = {
-       BPF_MOV64_IMM(BPF_REG_0, 1),
-       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
-       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
-       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
-       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
-       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
-       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
-       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
-       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
-       BPF_EXIT_INSN(),
-       BPF_MOV64_IMM(BPF_REG_0, 42),
-       BPF_EXIT_INSN(),
-       },
-       .result = ACCEPT,
-       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-       .retval = 3,
-},