From: Simon Pilgrim Date: Mon, 1 Feb 2016 21:46:12 +0000 (+0000) Subject: [X86][SSE] Regenerated load vector + element extraction tests. X-Git-Tag: llvmorg-3.9.0-rc1~15435 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e9848d4a8873d68d68bd444c944d42be9a92ef24;p=platform%2Fupstream%2Fllvm.git [X86][SSE] Regenerated load vector + element extraction tests. llvm-svn: 259416 --- diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll index e50d353..fca8465 100644 --- a/llvm/test/CodeGen/X86/extractelement-load.ll +++ b/llvm/test/CodeGen/X86/extractelement-load.ll @@ -1,28 +1,48 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s -; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s -; RUN: llc < %s -march=x86-64 -mattr=+avx -mcpu=btver2 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=X64-SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" define i32 @t(<2 x i64>* %val) nounwind { -; CHECK-LABEL: t: -; CHECK-NOT: movd -; CHECK: movl 8( -; CHECK-NEXT: ret - %tmp2 = load <2 x i64>, <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1] - %tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1] - %tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; [#uses=1] - ret i32 %tmp4 +; X32-SSE2-LABEL: t: +; X32-SSE2: # BB#0: +; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-SSE2-NEXT: movl 8(%eax), %eax +; X32-SSE2-NEXT: retl +; +; X64-SSSE3-LABEL: t: +; X64-SSSE3: # BB#0: +; X64-SSSE3-NEXT: movl 8(%rdi), %eax +; X64-SSSE3-NEXT: retq +; +; X64-AVX-LABEL: t: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: movl 8(%rdi), %eax +; X64-AVX-NEXT: retq + %tmp2 = load <2 x i64>, <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1] + %tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1] + %tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; [#uses=1] + ret i32 %tmp4 } ; Case where extractelement of load ends up as undef. ; (Making sure this doesn't crash.) define i32 @t2(<8 x i32>* %xp) { -; CHECK-LABEL: t2: -; CHECK: ret +; X32-SSE2-LABEL: t2: +; X32-SSE2: # BB#0: +; X32-SSE2-NEXT: retl +; +; X64-SSSE3-LABEL: t2: +; X64-SSSE3: # BB#0: +; X64-SSSE3-NEXT: retq +; +; X64-AVX-LABEL: t2: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: retq %x = load <8 x i32>, <8 x i32>* %xp - %Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> + %Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> %y = extractelement <8 x i32> %Shuff68, i32 0 ret i32 %y } @@ -36,10 +56,20 @@ undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3> ; need to special-case the checks. define void @t3() { -; CHECK-LABEL: t3: -; CHECK: movupd -; CHECK: movhpd - +; X32-SSE2-LABEL: t3: +; X32-SSE2: # BB#0: # %bb +; X32-SSE2-NEXT: movupd (%eax), %xmm0 +; X32-SSE2-NEXT: movhpd %xmm0, (%eax) +; +; X64-SSSE3-LABEL: t3: +; X64-SSSE3: # BB#0: # %bb +; X64-SSSE3-NEXT: movupd (%rax), %xmm0 +; X64-SSSE3-NEXT: movhpd %xmm0, (%rax) +; +; X64-AVX-LABEL: t3: +; X64-AVX: # BB#0: # %bb +; X64-AVX-NEXT: vmovupd (%rax), %xmm0 +; X64-AVX-NEXT: vmovhpd %xmm0, (%rax) bb: %tmp13 = load <2 x double>, <2 x double>* undef, align 1 %.sroa.3.24.vec.extract = extractelement <2 x double> %tmp13, i32 1 @@ -52,9 +82,26 @@ bb: ; This is testing for an assertion - the extraction was assuming that the undef ; second shuffle operand was a post-bitcast type instead of a pre-bitcast type. define i64 @t4(<2 x double>* %a) { -; CHECK-LABEL: t4: -; CHECK: mov -; CHECK: ret +; X32-SSE2-LABEL: t4: +; X32-SSE2: # BB#0: +; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-SSE2-NEXT: movapd (%eax), %xmm0 +; X32-SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; X32-SSE2-NEXT: movd %xmm1, %eax +; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; X32-SSE2-NEXT: movd %xmm0, %edx +; X32-SSE2-NEXT: retl +; +; X64-SSSE3-LABEL: t4: +; X64-SSSE3: # BB#0: +; X64-SSSE3-NEXT: movq (%rdi), %rax +; X64-SSSE3-NEXT: retq +; +; X64-AVX-LABEL: t4: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: movq (%rdi), %rax +; X64-AVX-NEXT: retq %b = load <2 x double>, <2 x double>* %a, align 16 %c = shufflevector <2 x double> %b, <2 x double> %b, <2 x i32> %d = bitcast <2 x double> %c to <2 x i64>