From b85f42a4a6dd08097a0f4f7be3a46ec8153dc89f Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 31 May 2023 10:56:41 +0100 Subject: [PATCH] [X86] Replace X32 test check prefixes with X86 We try to only use X32 for gnux32 triple test cases --- llvm/test/CodeGen/X86/vec_insert-2.ll | 42 +++++------ llvm/test/CodeGen/X86/vec_insert-3.ll | 16 ++--- llvm/test/CodeGen/X86/vec_insert-4.ll | 26 +++---- llvm/test/CodeGen/X86/vec_insert-5.ll | 120 ++++++++++++++++---------------- llvm/test/CodeGen/X86/vec_insert-8.ll | 58 +++++++-------- llvm/test/CodeGen/X86/vec_insert-9.ll | 12 ++-- llvm/test/CodeGen/X86/vec_insert-mmx.ll | 46 ++++++------ 7 files changed, 160 insertions(+), 160 deletions(-) diff --git a/llvm/test/CodeGen/X86/vec_insert-2.ll b/llvm/test/CodeGen/X86/vec_insert-2.ll index 33de9dd..bd16085 100644 --- a/llvm/test/CodeGen/X86/vec_insert-2.ll +++ b/llvm/test/CodeGen/X86/vec_insert-2.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X64 define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind { -; X32-LABEL: t1: -; X32: # %bb.0: -; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3] -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] -; X32-NEXT: retl +; X86-LABEL: t1: +; X86: # %bb.0: +; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] +; X86-NEXT: retl ; ; X64-LABEL: t1: ; X64: # %bb.0: @@ -21,12 +21,12 @@ define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind { } define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind { -; X32-LABEL: t2: -; X32: # %bb.0: -; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3] -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] -; X32-NEXT: retl +; X86-LABEL: t2: +; X86: # %bb.0: +; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] +; X86-NEXT: retl ; ; X64-LABEL: t2: ; X64: # %bb.0: @@ -39,10 +39,10 @@ define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind { } define <2 x double> @t3(double %s, <2 x double> %tmp) nounwind { -; X32-LABEL: t3: -; X32: # %bb.0: -; X32-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] -; X32-NEXT: retl +; X86-LABEL: t3: +; X86: # %bb.0: +; X86-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] +; X86-NEXT: retl ; ; X64-LABEL: t3: ; X64: # %bb.0: @@ -54,10 +54,10 @@ define <2 x double> @t3(double %s, <2 x double> %tmp) nounwind { } define <8 x i16> @t4(i16 %s, <8 x i16> %tmp) nounwind { -; X32-LABEL: t4: -; X32: # %bb.0: -; X32-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0 -; X32-NEXT: retl +; X86-LABEL: t4: +; X86: # %bb.0: +; X86-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0 +; X86-NEXT: retl ; ; X64-LABEL: t4: ; X64: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vec_insert-3.ll b/llvm/test/CodeGen/X86/vec_insert-3.ll index 9fb6fa9..9a5e1de 100644 --- a/llvm/test/CodeGen/X86/vec_insert-3.ll +++ b/llvm/test/CodeGen/X86/vec_insert-3.ll @@ -1,15 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X64 define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind { -; X32-LABEL: t1: -; X32: # %bb.0: -; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0] -; X32-NEXT: retl +; X86-LABEL: t1: +; X86: # %bb.0: +; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; X86-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0] +; X86-NEXT: retl ; ; X64-LABEL: t1: ; X64: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vec_insert-4.ll b/llvm/test/CodeGen/X86/vec_insert-4.ll index ed8833b..0182391 100644 --- a/llvm/test/CodeGen/X86/vec_insert-4.ll +++ b/llvm/test/CodeGen/X86/vec_insert-4.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-apple-darwin9.2.2 -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i686-apple-darwin9.2.2 -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-apple-darwin9.2.2 -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X64 define <8 x float> @f(<8 x float> %a, i32 %b) nounwind { -; X32-LABEL: f: -; X32: ## %bb.0: ## %entry -; X32-NEXT: subl $44, %esp -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: andl $7, %eax -; X32-NEXT: movaps %xmm1, {{[0-9]+}}(%esp) -; X32-NEXT: movaps %xmm0, (%esp) -; X32-NEXT: movl $1084227584, (%esp,%eax,4) ## imm = 0x40A00000 -; X32-NEXT: movaps (%esp), %xmm0 -; X32-NEXT: movaps {{[0-9]+}}(%esp), %xmm1 -; X32-NEXT: addl $44, %esp -; X32-NEXT: retl +; X86-LABEL: f: +; X86: ## %bb.0: ## %entry +; X86-NEXT: subl $44, %esp +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $7, %eax +; X86-NEXT: movaps %xmm1, {{[0-9]+}}(%esp) +; X86-NEXT: movaps %xmm0, (%esp) +; X86-NEXT: movl $1084227584, (%esp,%eax,4) ## imm = 0x40A00000 +; X86-NEXT: movaps (%esp), %xmm0 +; X86-NEXT: movaps {{[0-9]+}}(%esp), %xmm1 +; X86-NEXT: addl $44, %esp +; X86-NEXT: retl ; ; X64-LABEL: f: ; X64: ## %bb.0: ## %entry diff --git a/llvm/test/CodeGen/X86/vec_insert-5.ll b/llvm/test/CodeGen/X86/vec_insert-5.ll index 38ee059..be15596 100644 --- a/llvm/test/CodeGen/X86/vec_insert-5.ll +++ b/llvm/test/CodeGen/X86/vec_insert-5.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,+ssse3 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2,+ssse3 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+ssse3 | FileCheck %s --check-prefixes=X64,ALIGN ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+ssse3,sse-unaligned-mem | FileCheck %s --check-prefixes=X64,UNALIGN ; There are no MMX operations in @t1 define void @t1(i32 %a, ptr %P) nounwind { -; X32-LABEL: t1: -; X32: # %bb.0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: shll $12, %ecx -; X32-NEXT: movd %ecx, %xmm0 -; X32-NEXT: psllq $32, %xmm0 -; X32-NEXT: movq %xmm0, (%eax) -; X32-NEXT: retl +; X86-LABEL: t1: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: shll $12, %ecx +; X86-NEXT: movd %ecx, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: movq %xmm0, (%eax) +; X86-NEXT: retl ; ; X64-LABEL: t1: ; X64: # %bb.0: @@ -32,14 +32,14 @@ define void @t1(i32 %a, ptr %P) nounwind { } define <4 x float> @t2(ptr %P) nounwind { -; X32-LABEL: t2: -; X32: # %bb.0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: xorps %xmm0, %xmm0 -; X32-NEXT: xorps %xmm1, %xmm1 -; X32-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] -; X32-NEXT: retl +; X86-LABEL: t2: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorps %xmm0, %xmm0 +; X86-NEXT: xorps %xmm1, %xmm1 +; X86-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] +; X86-NEXT: retl ; ; X64-LABEL: t2: ; X64: # %bb.0: @@ -54,12 +54,12 @@ define <4 x float> @t2(ptr %P) nounwind { } define <4 x float> @t3(ptr %P) nounwind { -; X32-LABEL: t3: -; X32: # %bb.0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: xorps %xmm0, %xmm0 -; X32-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] -; X32-NEXT: retl +; X86-LABEL: t3: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorps %xmm0, %xmm0 +; X86-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] +; X86-NEXT: retl ; ; X64-LABEL: t3: ; X64: # %bb.0: @@ -72,14 +72,14 @@ define <4 x float> @t3(ptr %P) nounwind { } define <4 x float> @t4(ptr %P) nounwind { -; X32-LABEL: t4: -; X32: # %bb.0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: xorps %xmm1, %xmm1 -; X32-NEXT: xorps %xmm0, %xmm0 -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],mem[3,0] -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3] -; X32-NEXT: retl +; X86-LABEL: t4: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorps %xmm1, %xmm1 +; X86-NEXT: xorps %xmm0, %xmm0 +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],mem[3,0] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3] +; X86-NEXT: retl ; ; X64-LABEL: t4: ; X64: # %bb.0: @@ -94,14 +94,14 @@ define <4 x float> @t4(ptr %P) nounwind { } define <4 x float> @t4_under_aligned(ptr %P) nounwind { -; X32-LABEL: t4_under_aligned: -; X32: # %bb.0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movups (%eax), %xmm0 -; X32-NEXT: xorps %xmm1, %xmm1 -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[1,0] -; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3] -; X32-NEXT: retl +; X86-LABEL: t4_under_aligned: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movups (%eax), %xmm0 +; X86-NEXT: xorps %xmm1, %xmm1 +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[1,0] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3] +; X86-NEXT: retl ; ; ALIGN-LABEL: t4_under_aligned: ; ALIGN: # %bb.0: @@ -124,10 +124,10 @@ define <4 x float> @t4_under_aligned(ptr %P) nounwind { } define <16 x i8> @t5(<16 x i8> %x) nounwind { -; X32-LABEL: t5: -; X32: # %bb.0: -; X32-NEXT: psrlw $8, %xmm0 -; X32-NEXT: retl +; X86-LABEL: t5: +; X86: # %bb.0: +; X86-NEXT: psrlw $8, %xmm0 +; X86-NEXT: retl ; ; X64-LABEL: t5: ; X64: # %bb.0: @@ -138,10 +138,10 @@ define <16 x i8> @t5(<16 x i8> %x) nounwind { } define <16 x i8> @t6(<16 x i8> %x) nounwind { -; X32-LABEL: t6: -; X32: # %bb.0: -; X32-NEXT: psrlw $8, %xmm0 -; X32-NEXT: retl +; X86-LABEL: t6: +; X86: # %bb.0: +; X86-NEXT: psrlw $8, %xmm0 +; X86-NEXT: retl ; ; X64-LABEL: t6: ; X64: # %bb.0: @@ -152,10 +152,10 @@ define <16 x i8> @t6(<16 x i8> %x) nounwind { } define <16 x i8> @t7(<16 x i8> %x) nounwind { -; X32-LABEL: t7: -; X32: # %bb.0: -; X32-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2] -; X32-NEXT: retl +; X86-LABEL: t7: +; X86: # %bb.0: +; X86-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2] +; X86-NEXT: retl ; ; X64-LABEL: t7: ; X64: # %bb.0: @@ -166,10 +166,10 @@ define <16 x i8> @t7(<16 x i8> %x) nounwind { } define <16 x i8> @t8(<16 x i8> %x) nounwind { -; X32-LABEL: t8: -; X32: # %bb.0: -; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero -; X32-NEXT: retl +; X86-LABEL: t8: +; X86: # %bb.0: +; X86-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero +; X86-NEXT: retl ; ; X64-LABEL: t8: ; X64: # %bb.0: @@ -180,10 +180,10 @@ define <16 x i8> @t8(<16 x i8> %x) nounwind { } define <16 x i8> @t9(<16 x i8> %x) nounwind { -; X32-LABEL: t9: -; X32: # %bb.0: -; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero -; X32-NEXT: retl +; X86-LABEL: t9: +; X86: # %bb.0: +; X86-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero +; X86-NEXT: retl ; ; X64-LABEL: t9: ; X64: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vec_insert-8.ll b/llvm/test/CodeGen/X86/vec_insert-8.ll index a89b967..aa3364b 100644 --- a/llvm/test/CodeGen/X86/vec_insert-8.ll +++ b/llvm/test/CodeGen/X86/vec_insert-8.ll @@ -1,25 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 ; tests variable insert and extract of a 4 x i32 define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind { -; X32-LABEL: var_insert: -; X32: # %bb.0: # %entry -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-16, %esp -; X32-NEXT: subl $32, %esp -; X32-NEXT: movl 12(%ebp), %eax -; X32-NEXT: andl $3, %eax -; X32-NEXT: movl 8(%ebp), %ecx -; X32-NEXT: movaps %xmm0, (%esp) -; X32-NEXT: movl %ecx, (%esp,%eax,4) -; X32-NEXT: movaps (%esp), %xmm0 -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp -; X32-NEXT: retl +; X86-LABEL: var_insert: +; X86: # %bb.0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-16, %esp +; X86-NEXT: subl $32, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: andl $3, %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movaps %xmm0, (%esp) +; X86-NEXT: movl %ecx, (%esp,%eax,4) +; X86-NEXT: movaps (%esp), %xmm0 +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl ; ; X64-LABEL: var_insert: ; X64: # %bb.0: # %entry @@ -35,19 +35,19 @@ entry: } define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind { -; X32-LABEL: var_extract: -; X32: # %bb.0: # %entry -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-16, %esp -; X32-NEXT: subl $32, %esp -; X32-NEXT: movl 8(%ebp), %eax -; X32-NEXT: andl $3, %eax -; X32-NEXT: movaps %xmm0, (%esp) -; X32-NEXT: movl (%esp,%eax,4), %eax -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp -; X32-NEXT: retl +; X86-LABEL: var_extract: +; X86: # %bb.0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-16, %esp +; X86-NEXT: subl $32, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: andl $3, %eax +; X86-NEXT: movaps %xmm0, (%esp) +; X86-NEXT: movl (%esp,%eax,4), %eax +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl ; ; X64-LABEL: var_extract: ; X64: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/vec_insert-9.ll b/llvm/test/CodeGen/X86/vec_insert-9.ll index a750c6f..be77dc4 100644 --- a/llvm/test/CodeGen/X86/vec_insert-9.ll +++ b/llvm/test/CodeGen/X86/vec_insert-9.ll @@ -1,13 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 define <4 x i32> @var_insert2(<4 x i32> %x, i32 %val, i32 %idx) nounwind { -; X32-LABEL: var_insert2: -; X32: # %bb.0: # %entry -; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0 -; X32-NEXT: retl +; X86-LABEL: var_insert2: +; X86: # %bb.0: # %entry +; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0 +; X86-NEXT: retl ; ; X64-LABEL: var_insert2: ; X64: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/vec_insert-mmx.ll b/llvm/test/CodeGen/X86/vec_insert-mmx.ll index 6e7b939..72b71fa 100644 --- a/llvm/test/CodeGen/X86/vec_insert-mmx.ll +++ b/llvm/test/CodeGen/X86/vec_insert-mmx.ll @@ -1,15 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86 ; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse4.1 | FileCheck %s --check-prefix=X64 ; This is not an MMX operation; promoted to xmm. define x86_mmx @t0(i32 %A) nounwind { -; X32-LABEL: t0: -; X32: ## %bb.0: -; X32-NEXT: movd {{[0-9]+}}(%esp), %mm1 -; X32-NEXT: pxor %mm0, %mm0 -; X32-NEXT: punpckldq %mm1, %mm0 ## mm0 = mm0[0],mm1[0] -; X32-NEXT: retl +; X86-LABEL: t0: +; X86: ## %bb.0: +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1 +; X86-NEXT: pxor %mm0, %mm0 +; X86-NEXT: punpckldq %mm1, %mm0 ## mm0 = mm0[0],mm1[0] +; X86-NEXT: retl ; ; X64-LABEL: t0: ; X64: ## %bb.0: @@ -22,10 +22,10 @@ define x86_mmx @t0(i32 %A) nounwind { } define <8 x i8> @t1(i8 zeroext %x) nounwind { -; X32-LABEL: t1: -; X32: ## %bb.0: -; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-NEXT: retl +; X86-LABEL: t1: +; X86: ## %bb.0: +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: retl ; ; X64-LABEL: t1: ; X64: ## %bb.0: @@ -37,10 +37,10 @@ define <8 x i8> @t1(i8 zeroext %x) nounwind { ; PR2574 define <2 x float> @t2(<2 x float> %a0) { -; X32-LABEL: t2: -; X32: ## %bb.0: -; X32-NEXT: xorps %xmm0, %xmm0 -; X32-NEXT: retl +; X86-LABEL: t2: +; X86: ## %bb.0: +; X86-NEXT: xorps %xmm0, %xmm0 +; X86-NEXT: retl ; ; X64-LABEL: t2: ; X64: ## %bb.0: @@ -56,14 +56,14 @@ define <2 x float> @t2(<2 x float> %a0) { ; PR2562 define void @t3() { -; X32-LABEL: t3: -; X32: ## %bb.0: -; X32-NEXT: movl L_g0$non_lazy_ptr, %eax -; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx -; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; X32-NEXT: pinsrw $0, (%eax), %xmm0 -; X32-NEXT: movq %xmm0, (%ecx) -; X32-NEXT: retl +; X86-LABEL: t3: +; X86: ## %bb.0: +; X86-NEXT: movl L_g0$non_lazy_ptr, %eax +; X86-NEXT: movl L_g1$non_lazy_ptr, %ecx +; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: pinsrw $0, (%eax), %xmm0 +; X86-NEXT: movq %xmm0, (%ecx) +; X86-NEXT: retl ; ; X64-LABEL: t3: ; X64: ## %bb.0: -- 2.7.4