bpf, arm64: Optimize ADD,SUB,JMP BPF_K using arm64 add/sub immediates
authorLuke Nelson <lukenels@cs.washington.edu>
Fri, 8 May 2020 18:15:46 +0000 (11:15 -0700)
committerWill Deacon <will@kernel.org>
Mon, 11 May 2020 11:21:39 +0000 (12:21 +0100)
The current code for BPF_{ADD,SUB} BPF_K loads the BPF immediate to a
temporary register before performing the addition/subtraction. Similarly,
BPF_JMP BPF_K cases load the immediate to a temporary register before
comparison.

This patch introduces optimizations that use arm64 immediate add, sub,
cmn, or cmp instructions when the BPF immediate fits. If the immediate
does not fit, it falls back to using a temporary register.

Example of generated code for BPF_ALU64_IMM(BPF_ADD, R0, 2):

without optimization:

  24: mov x10, #0x2
  28: add x7, x7, x10

with optimization:

  24: add x7, x7, #0x2

The code could use A64_{ADD,SUB}_I directly and check if it returns
AARCH64_BREAK_FAULT, similar to how logical immediates are handled.
However, aarch64_insn_gen_add_sub_imm from insn.c prints error messages
when the immediate does not fit, and it's simpler to check if the
immediate fits ahead of time.

Co-developed-by: Xi Wang <xi.wang@gmail.com>
Signed-off-by: Xi Wang <xi.wang@gmail.com>
Signed-off-by: Luke Nelson <luke.r.nels@gmail.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/r/20200508181547.24783-4-luke.r.nels@gmail.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/net/bpf_jit.h
arch/arm64/net/bpf_jit_comp.c

index f36a779..923ae7f 100644 (file)
 /* Rd = Rn OP imm12 */
 #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
 #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
+#define A64_ADDS_I(sf, Rd, Rn, imm12) \
+       A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
+#define A64_SUBS_I(sf, Rd, Rn, imm12) \
+       A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
+/* Rn + imm12; set condition flags */
+#define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
+/* Rn - imm12; set condition flags */
+#define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
 /* Rd = Rn */
 #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
 
index 083e5d8..561a2fe 100644 (file)
@@ -167,6 +167,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
        return to - from;
 }
 
+static bool is_addsub_imm(u32 imm)
+{
+       /* Either imm12 or shifted imm12. */
+       return !(imm & ~0xfff) || !(imm & ~0xfff000);
+}
+
 /* Stack must be multiples of 16B */
 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
 
@@ -479,13 +485,25 @@ emit_bswap_uxt:
        /* dst = dst OP imm */
        case BPF_ALU | BPF_ADD | BPF_K:
        case BPF_ALU64 | BPF_ADD | BPF_K:
-               emit_a64_mov_i(is64, tmp, imm, ctx);
-               emit(A64_ADD(is64, dst, dst, tmp), ctx);
+               if (is_addsub_imm(imm)) {
+                       emit(A64_ADD_I(is64, dst, dst, imm), ctx);
+               } else if (is_addsub_imm(-imm)) {
+                       emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
+               } else {
+                       emit_a64_mov_i(is64, tmp, imm, ctx);
+                       emit(A64_ADD(is64, dst, dst, tmp), ctx);
+               }
                break;
        case BPF_ALU | BPF_SUB | BPF_K:
        case BPF_ALU64 | BPF_SUB | BPF_K:
-               emit_a64_mov_i(is64, tmp, imm, ctx);
-               emit(A64_SUB(is64, dst, dst, tmp), ctx);
+               if (is_addsub_imm(imm)) {
+                       emit(A64_SUB_I(is64, dst, dst, imm), ctx);
+               } else if (is_addsub_imm(-imm)) {
+                       emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
+               } else {
+                       emit_a64_mov_i(is64, tmp, imm, ctx);
+                       emit(A64_SUB(is64, dst, dst, tmp), ctx);
+               }
                break;
        case BPF_ALU | BPF_AND | BPF_K:
        case BPF_ALU64 | BPF_AND | BPF_K:
@@ -639,8 +657,14 @@ emit_cond_jmp:
        case BPF_JMP32 | BPF_JSLT | BPF_K:
        case BPF_JMP32 | BPF_JSGE | BPF_K:
        case BPF_JMP32 | BPF_JSLE | BPF_K:
-               emit_a64_mov_i(is64, tmp, imm, ctx);
-               emit(A64_CMP(is64, dst, tmp), ctx);
+               if (is_addsub_imm(imm)) {
+                       emit(A64_CMP_I(is64, dst, imm), ctx);
+               } else if (is_addsub_imm(-imm)) {
+                       emit(A64_CMN_I(is64, dst, -imm), ctx);
+               } else {
+                       emit_a64_mov_i(is64, tmp, imm, ctx);
+                       emit(A64_CMP(is64, dst, tmp), ctx);
+               }
                goto emit_cond_jmp;
        case BPF_JMP | BPF_JSET | BPF_K:
        case BPF_JMP32 | BPF_JSET | BPF_K: