From: Sanjay Patel Date: Fri, 14 Dec 2018 16:44:58 +0000 (+0000) Subject: [x86] make tests immune to scalarization improvements; NFC X-Git-Tag: llvmorg-8.0.0-rc1~2101 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b7d9f9117e68fab32607d4cc364c0d8018ad2d33;p=platform%2Fupstream%2Fllvm.git [x86] make tests immune to scalarization improvements; NFC llvm-svn: 349160 --- diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx1.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx1.ll index 8746720..91bf9e2 100644 --- a/llvm/test/CodeGen/X86/stack-folding-int-avx1.ll +++ b/llvm/test/CodeGen/X86/stack-folding-int-avx1.ll @@ -72,11 +72,11 @@ define <4 x i32> @stack_fold_movd_load(i32 %a0) { ret <4 x i32> %3 } -define i32 @stack_fold_movd_store(<4 x i32> %a0) { +define i32 @stack_fold_movd_store(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_movd_store ;CHECK: movd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill ; add forces execution domain - %1 = add <4 x i32> %a0, + %1 = add <4 x i32> %a0, %a1 %2 = extractelement <4 x i32> %1, i32 0 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i32 %2 @@ -92,11 +92,11 @@ define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) { ret <2 x i64> %3 } -define i64 @stack_fold_movq_store(<2 x i64> %a0) { +define i64 @stack_fold_movq_store(<2 x i64> %a0, <2 x i64> %a1) { ;CHECK-LABEL: stack_fold_movq_store ;CHECK: movq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill ; add forces execution domain - %1 = add <2 x i64> %a0, + %1 = add <2 x i64> %a0, %a1 %2 = extractelement <2 x i64> %1, i32 0 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i64 %2 @@ -436,12 +436,12 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin ; TODO stack_fold_pextrb -define i32 @stack_fold_pextrd(<4 x i32> %a0) { +define i32 @stack_fold_pextrd(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_pextrd ;CHECK: vpextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload ; add forces execution domain - %1 = add <4 x i32> %a0, + %1 = add <4 x i32> %a0, %a1 %2 = extractelement <4 x i32> %1, i32 1 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i32 %2 diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll index 01ae7ff..5464775 100644 --- a/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll +++ b/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll @@ -836,12 +836,12 @@ define <32 x i16> @stack_fold_permwvar_maskz(<32 x i16> %a0, <32 x i16> %a1, i32 ret <32 x i16> %4 } -define i32 @stack_fold_pextrd(<4 x i32> %a0) { +define i32 @stack_fold_pextrd(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_pextrd ;CHECK: vpextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload ; add forces execution domain - %1 = add <4 x i32> %a0, + %1 = add <4 x i32> %a0, %a1 %2 = extractelement <4 x i32> %1, i32 1 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i32 %2 diff --git a/llvm/test/CodeGen/X86/stack-folding-int-sse42.ll b/llvm/test/CodeGen/X86/stack-folding-int-sse42.ll index cabfa92..3c5aeb7 100644 --- a/llvm/test/CodeGen/X86/stack-folding-int-sse42.ll +++ b/llvm/test/CodeGen/X86/stack-folding-int-sse42.ll @@ -108,11 +108,11 @@ define <4 x i32> @stack_fold_movd_load(i32 %a0) { ret <4 x i32> %3 } -define i32 @stack_fold_movd_store(<4 x i32> %a0) { +define i32 @stack_fold_movd_store(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_movd_store ;CHECK: movd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill ; add forces execution domain - %1 = add <4 x i32> %a0, + %1 = add <4 x i32> %a0, %a1 %2 = extractelement <4 x i32> %1, i32 0 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i32 %2 @@ -128,11 +128,11 @@ define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) { ret <2 x i64> %3 } -define i64 @stack_fold_movq_store(<2 x i64> %a0) { +define i64 @stack_fold_movq_store(<2 x i64> %a0, <2 x i64> %a1) { ;CHECK-LABEL: stack_fold_movq_store ;CHECK: movq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill ; add forces execution domain - %1 = add <2 x i64> %a0, + %1 = add <2 x i64> %a0, %a1 %2 = extractelement <2 x i64> %1, i32 0 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i64 %2 @@ -487,12 +487,12 @@ entry: ret i16 %extract } -define i32 @stack_fold_pextrd(<4 x i32> %a0) { +define i32 @stack_fold_pextrd(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_pextrd ;CHECK: pextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload ; add forces execution domain - %1 = add <4 x i32> %a0, + %1 = add <4 x i32> %a0, %a1 %2 = extractelement <4 x i32> %1, i32 1 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() ret i32 %2