/* Rd = Rn OP imm12 */
#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
+#define A64_ADDS_I(sf, Rd, Rn, imm12) \
+ A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
+#define A64_SUBS_I(sf, Rd, Rn, imm12) \
+ A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
+/* Rn + imm12; set condition flags */
+#define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
+/* Rn - imm12; set condition flags */
+#define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
/* Rd = Rn */
#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
return to - from;
}
+static bool is_addsub_imm(u32 imm)
+{
+ /* Either imm12 or shifted imm12. */
+ return !(imm & ~0xfff) || !(imm & ~0xfff000);
+}
+
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
- emit_a64_mov_i(is64, tmp, imm, ctx);
- emit(A64_ADD(is64, dst, dst, tmp), ctx);
+ if (is_addsub_imm(imm)) {
+ emit(A64_ADD_I(is64, dst, dst, imm), ctx);
+ } else if (is_addsub_imm(-imm)) {
+ emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
+ } else {
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_ADD(is64, dst, dst, tmp), ctx);
+ }
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
- emit_a64_mov_i(is64, tmp, imm, ctx);
- emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ if (is_addsub_imm(imm)) {
+ emit(A64_SUB_I(is64, dst, dst, imm), ctx);
+ } else if (is_addsub_imm(-imm)) {
+ emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
+ } else {
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ }
break;
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
- emit_a64_mov_i(is64, tmp, imm, ctx);
- emit(A64_CMP(is64, dst, tmp), ctx);
+ if (is_addsub_imm(imm)) {
+ emit(A64_CMP_I(is64, dst, imm), ctx);
+ } else if (is_addsub_imm(-imm)) {
+ emit(A64_CMN_I(is64, dst, -imm), ctx);
+ } else {
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_CMP(is64, dst, tmp), ctx);
+ }
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K: