From 700fdb10706186a464cc86b8c1fae0cd778a449d Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Thu, 30 May 2019 16:07:19 +0000 Subject: [PATCH] [NFC][Codegen] Add better test coverage for potential add/sub constant folding This adds hopefully-full test coverage for all the possible permutations: First op is one of: * x + c1 * x - c1 * c1 - x Second op is one of: * + c2 * - c2 * c2 - And thus 3*3=9 patterns. Some of them show missed constant-folds. Without previous patch (the revert), these tests were causing endless dagcombine loop. I really should have thought about this first :S llvm-svn: 362110 --- .../CodeGen/AArch64/addsub-constant-folding.ll | 455 ++++++++++++++ llvm/test/CodeGen/AArch64/vec_add.ll | 126 ---- llvm/test/CodeGen/X86/addsub-constant-folding.ll | 657 +++++++++++++++++++++ llvm/test/CodeGen/X86/vec_add.ll | 166 ------ 4 files changed, 1112 insertions(+), 292 deletions(-) create mode 100644 llvm/test/CodeGen/AArch64/addsub-constant-folding.ll delete mode 100644 llvm/test/CodeGen/AArch64/vec_add.ll create mode 100644 llvm/test/CodeGen/X86/addsub-constant-folding.ll delete mode 100644 llvm/test/CodeGen/X86/vec_add.ll diff --git a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll new file mode 100644 index 0000000..6d0f223 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll @@ -0,0 +1,455 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s + +declare void @use(<4 x i32> %arg) + +; (x+c1)+c2 + +define <4 x i32> @add_const_add_const(<4 x i32> %arg) { +; CHECK-LABEL: add_const_add_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #10 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_add_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: add_const_add_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #10 +; CHECK-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_add_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: add_const_add_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +; (x+c1)-c2 + +define <4 x i32> @add_const_sub_const(<4 x i32> %arg) { +; CHECK-LABEL: add_const_sub_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: add_const_sub_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: add_const_sub_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; c2-(x+c1) + +define <4 x i32> @add_const_const_sub(<4 x i32> %arg) { +; CHECK-LABEL: add_const_const_sub: +; CHECK: // %bb.0: +; CHECK-NEXT: mvni v1.4s, #5 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_const_sub_extrause(<4 x i32> %arg) { +; CHECK-LABEL: add_const_const_sub_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: mvni v0.4s, #5 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_const_sub_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: add_const_const_sub_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; (x-c1)+c2 + +define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_add_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_add_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_add_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI11_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI11_0] +; CHECK-NEXT: adrp x8, .LCPI11_1 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI11_1] +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +; (x-c1)-c2 + +define <4 x i32> @sub_const_sub_const(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_sub_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_sub_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_sub_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_sub_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_sub_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; c2-(x-c1) + +define <4 x i32> @sub_const_const_sub(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_const_sub: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #10 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_const_sub_extrause(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_const_sub_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_const_sub_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_const_sub_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI17_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; (c1-x)+c2 + +define <4 x i32> @const_sub_add_const(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_add_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #10 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_add_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_add_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #10 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_add_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_add_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI20_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI20_0] +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +; (c1-x)-c2 + +define <4 x i32> @const_sub_sub_const(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_sub_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_sub_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_sub_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_sub_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_sub_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI23_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI23_0] +; CHECK-NEXT: adrp x8, .LCPI23_1 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI23_1] +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; c2-(c1-x) + +define <4 x i32> @const_sub_const_sub(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_const_sub: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_const_sub_extrause(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_const_sub_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_const_sub_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: const_sub_const_sub_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI26_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI26_0] +; CHECK-NEXT: adrp x8, .LCPI26_1 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI26_1] +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} diff --git a/llvm/test/CodeGen/AArch64/vec_add.ll b/llvm/test/CodeGen/AArch64/vec_add.ll deleted file mode 100644 index 208ad95..0000000 --- a/llvm/test/CodeGen/AArch64/vec_add.ll +++ /dev/null @@ -1,126 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s - -declare void @use(<4 x i32> %arg) - -define <2 x i64> @test(<2 x i64> %a, <2 x i64> %b) { -; CHECK-LABEL: test: -; CHECK: // %bb.0: -; CHECK-NEXT: add v0.2d, v1.2d, v0.2d -; CHECK-NEXT: ret - %tmp9 = add <2 x i64> %b, %a - ret <2 x i64> %tmp9 -} - -define <4 x i32> @add_const_add_const(<4 x i32> %arg) { -; CHECK-LABEL: add_const_add_const: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #10 -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: ret - %t0 = add <4 x i32> %arg, - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @add_const_sub_const(<4 x i32> %arg) { -; CHECK-LABEL: add_const_sub_const: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: mvni v1.4s, #1 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: ret - %t0 = add <4 x i32> %arg, - %t1 = sub <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @add_const_sub_const_extrause(<4 x i32> %arg) { -; CHECK-LABEL: add_const_sub_const_extrause: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: bl use -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: mvni v0.4s, #1 -; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s -; CHECK-NEXT: add sp, sp, #32 // =32 -; CHECK-NEXT: ret - %t0 = add <4 x i32> %arg, - call void @use(<4 x i32> %t0) - %t1 = sub <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @add_const_sub_const_nonsplat(<4 x i32> %arg) { -; CHECK-LABEL: add_const_sub_const_nonsplat: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, .LCPI4_0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI4_0] -; CHECK-NEXT: adrp x8, .LCPI4_1 -; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI4_1] -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: sub v0.4s, v0.4s, v2.4s -; CHECK-NEXT: ret - %t0 = add <4 x i32> %arg, - %t1 = sub <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { -; CHECK-LABEL: sub_const_add_const: -; CHECK: // %bb.0: -; CHECK-NEXT: mvni v1.4s, #1 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: ret - %t0 = sub <4 x i32> %arg, - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { -; CHECK-LABEL: sub_const_add_const_extrause: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: mvni v1.4s, #1 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: bl use -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: movi v0.4s, #8 -; CHECK-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-NEXT: add sp, sp, #32 // =32 -; CHECK-NEXT: ret - %t0 = sub <4 x i32> %arg, - call void @use(<4 x i32> %t0) - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { -; CHECK-LABEL: sub_const_add_const_nonsplat: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, .LCPI7_0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI7_0] -; CHECK-NEXT: adrp x8, .LCPI7_1 -; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI7_1] -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add v0.4s, v0.4s, v2.4s -; CHECK-NEXT: ret - %t0 = sub <4 x i32> %arg, - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} diff --git a/llvm/test/CodeGen/X86/addsub-constant-folding.ll b/llvm/test/CodeGen/X86/addsub-constant-folding.ll new file mode 100644 index 0000000..100c366 --- /dev/null +++ b/llvm/test/CodeGen/X86/addsub-constant-folding.ll @@ -0,0 +1,657 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,SSE,X86-SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64,SSE,X64-SSE + +declare void @use(<4 x i32> %arg) + +; (x+c1)+c2 + +define <4 x i32> @add_const_add_const(<4 x i32> %arg) { +; X86-LABEL: add_const_add_const: +; X86: # %bb.0: +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_add_const: +; X64: # %bb.0: +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_add_const_extrause(<4 x i32> %arg) { +; X86-LABEL: add_const_add_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: add_const_add_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_add_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: add_const_add_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_add_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +; (x+c1)-c2 + +define <4 x i32> @add_const_sub_const(<4 x i32> %arg) { +; X86-LABEL: add_const_sub_const: +; X86: # %bb.0: +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_sub_const: +; X64: # %bb.0: +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_extrause(<4 x i32> %arg) { +; X86-LABEL: add_const_sub_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: add_const_sub_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: add_const_sub_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = <4294967277,u,u,4294967290> +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_sub_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = <4294967277,u,u,4294967290> +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; c2-(x+c1) + +define <4 x i32> @add_const_const_sub(<4 x i32> %arg) { +; X86-LABEL: add_const_const_sub: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967290,4294967290,4294967290,4294967290] +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_const_sub: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [4294967290,4294967290,4294967290,4294967290] +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_const_sub_extrause(<4 x i32> %arg) { +; X86-LABEL: add_const_const_sub_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: calll use +; X86-NEXT: movdqa {{.*#+}} xmm0 = [4294967290,4294967290,4294967290,4294967290] +; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload +; X86-NEXT: psubd %xmm1, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: add_const_const_sub_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: callq use +; X64-NEXT: movdqa {{.*#+}} xmm0 = [4294967290,4294967290,4294967290,4294967290] +; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_const_sub_nonsplat(<4 x i32> %arg) { +; X86-LABEL: add_const_const_sub_nonsplat: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = <4294967277,u,u,4294967290> +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_const_sub_nonsplat: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = <4294967277,u,u,4294967290> +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; (x-c1)+c2 + +define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { +; X86-LABEL: sub_const_add_const: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_add_const: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { +; X86-LABEL: sub_const_add_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_add_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: sub_const_add_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_add_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +; (x-c1)-c2 + +define <4 x i32> @sub_const_sub_const(<4 x i32> %arg) { +; X86-LABEL: sub_const_sub_const: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_sub_const: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_sub_const_extrause(<4 x i32> %arg) { +; X86-LABEL: sub_const_sub_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_sub_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_sub_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: sub_const_sub_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = <23,u,u,10> +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_sub_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = <23,u,u,10> +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; c2-(x-c1) + +define <4 x i32> @sub_const_const_sub(<4 x i32> %arg) { +; X86-LABEL: sub_const_const_sub: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [10,10,10,10] +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_const_sub: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [10,10,10,10] +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_const_sub_extrause(<4 x i32> %arg) { +; X86-LABEL: sub_const_const_sub_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: calll use +; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2] +; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload +; X86-NEXT: psubd %xmm1, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_const_sub_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: callq use +; X64-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2] +; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_const_sub_nonsplat(<4 x i32> %arg) { +; X86-LABEL: sub_const_const_sub_nonsplat: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = <23,u,u,10> +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_const_sub_nonsplat: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = <23,u,u,10> +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; (c1-x)+c2 + +define <4 x i32> @const_sub_add_const(<4 x i32> %arg) { +; X86-LABEL: const_sub_add_const: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [10,10,10,10] +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_add_const: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [10,10,10,10] +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_add_const_extrause(<4 x i32> %arg) { +; X86-LABEL: const_sub_add_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X86-NEXT: psubd %xmm1, %xmm0 +; X86-NEXT: calll use +; X86-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10] +; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload +; X86-NEXT: psubd %xmm1, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_add_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X64-NEXT: psubd %xmm1, %xmm0 +; X64-NEXT: callq use +; X64-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10] +; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_add_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: const_sub_add_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = <23,u,u,10> +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_add_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = <23,u,u,10> +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +; (c1-x)-c2 + +define <4 x i32> @const_sub_sub_const(<4 x i32> %arg) { +; X86-LABEL: const_sub_sub_const: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_sub_const: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: psubd {{.*}}(%rip), %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_sub_const_extrause(<4 x i32> %arg) { +; X86-LABEL: const_sub_sub_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqu %xmm1, (%esp) # 16-byte Spill +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_sub_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_sub_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: const_sub_sub_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_sub_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +; c2-(c1-x) + +define <4 x i32> @const_sub_const_sub(<4 x i32> %arg) { +; X86-LABEL: const_sub_const_sub: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_const_sub: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_const_sub_extrause(<4 x i32> %arg) { +; X86-LABEL: const_sub_const_sub_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X86-NEXT: psubd %xmm0, %xmm1 +; X86-NEXT: movdqu %xmm1, (%esp) # 16-byte Spill +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: calll use +; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2] +; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload +; X86-NEXT: psubd %xmm1, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_const_sub_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X64-NEXT: psubd %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: callq use +; X64-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2] +; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} + +define <4 x i32> @const_sub_const_sub_nonsplat(<4 x i32> %arg) { +; X86-LABEL: const_sub_const_sub_nonsplat: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: const_sub_const_sub_nonsplat: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> , %arg + %t1 = sub <4 x i32> , %t0 + ret <4 x i32> %t1 +} diff --git a/llvm/test/CodeGen/X86/vec_add.ll b/llvm/test/CodeGen/X86/vec_add.ll deleted file mode 100644 index 6e34397..0000000 --- a/llvm/test/CodeGen/X86/vec_add.ll +++ /dev/null @@ -1,166 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,SSE,X86-SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64,SSE,X64-SSE - -declare void @use(<4 x i32> %arg) - -define <2 x i64> @test(<2 x i64> %a, <2 x i64> %b) { -; X86-LABEL: test: -; X86: # %bb.0: -; X86-NEXT: paddq %xmm1, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: test: -; X64: # %bb.0: -; X64-NEXT: paddq %xmm1, %xmm0 -; X64-NEXT: retq - %tmp9 = add <2 x i64> %b, %a - ret <2 x i64> %tmp9 -} - -define <4 x i32> @add_const_add_const(<4 x i32> %arg) { -; X86-LABEL: add_const_add_const: -; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: add_const_add_const: -; X64: # %bb.0: -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: retq - %t0 = add <4 x i32> %arg, - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @add_const_sub_const(<4 x i32> %arg) { -; X86-LABEL: add_const_sub_const: -; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: add_const_sub_const: -; X64: # %bb.0: -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 -; X64-NEXT: retq - %t0 = add <4 x i32> %arg, - %t1 = sub <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @add_const_sub_const_extrause(<4 x i32> %arg) { -; X86-LABEL: add_const_sub_const_extrause: -; X86: # %bb.0: -; X86-NEXT: subl $28, %esp -; X86-NEXT: .cfi_def_cfa_offset 32 -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill -; X86-NEXT: calll use -; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: addl $28, %esp -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; -; X64-LABEL: add_const_sub_const_extrause: -; X64: # %bb.0: -; X64-NEXT: subq $24, %rsp -; X64-NEXT: .cfi_def_cfa_offset 32 -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill -; X64-NEXT: callq use -; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 -; X64-NEXT: addq $24, %rsp -; X64-NEXT: .cfi_def_cfa_offset 8 -; X64-NEXT: retq - %t0 = add <4 x i32> %arg, - call void @use(<4 x i32> %t0) - %t1 = sub <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @add_const_sub_const_nonsplat(<4 x i32> %arg) { -; X86-LABEL: add_const_sub_const_nonsplat: -; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: add_const_sub_const_nonsplat: -; X64: # %bb.0: -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 -; X64-NEXT: retq - %t0 = add <4 x i32> %arg, - %t1 = sub <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { -; X86-LABEL: sub_const_add_const: -; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: sub_const_add_const: -; X64: # %bb.0: -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: retq - %t0 = sub <4 x i32> %arg, - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { -; X86-LABEL: sub_const_add_const_extrause: -; X86: # %bb.0: -; X86-NEXT: subl $28, %esp -; X86-NEXT: .cfi_def_cfa_offset 32 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill -; X86-NEXT: calll use -; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: addl $28, %esp -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; -; X64-LABEL: sub_const_add_const_extrause: -; X64: # %bb.0: -; X64-NEXT: subq $24, %rsp -; X64-NEXT: .cfi_def_cfa_offset 32 -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 -; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill -; X64-NEXT: callq use -; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: addq $24, %rsp -; X64-NEXT: .cfi_def_cfa_offset 8 -; X64-NEXT: retq - %t0 = sub <4 x i32> %arg, - call void @use(<4 x i32> %t0) - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} - -define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { -; X86-LABEL: sub_const_add_const_nonsplat: -; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: sub_const_add_const_nonsplat: -; X64: # %bb.0: -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 -; X64-NEXT: retq - %t0 = sub <4 x i32> %arg, - %t1 = add <4 x i32> %t0, - ret <4 x i32> %t1 -} -- 2.7.4