From: Simon Pilgrim Date: Fri, 1 Apr 2016 21:00:00 +0000 (+0000) Subject: [X86][SSE] Regenerated comparison mask and float immediate tests X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=275b2bcb7633329b6afdb45b111bec26d543031d;p=platform%2Fupstream%2Fllvm.git [X86][SSE] Regenerated comparison mask and float immediate tests llvm-svn: 265184 --- diff --git a/llvm/test/CodeGen/X86/v4f32-immediate.ll b/llvm/test/CodeGen/X86/v4f32-immediate.ll index 68d20a0..7945b10 100644 --- a/llvm/test/CodeGen/X86/v4f32-immediate.ll +++ b/llvm/test/CodeGen/X86/v4f32-immediate.ll @@ -1,7 +1,16 @@ -; RUN: llc < %s -march=x86 -mattr=+sse | FileCheck %s - -; CHECK: movaps +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse | FileCheck %s --check-prefix=X64 define <4 x float> @foo() { +; X32-LABEL: foo: +; X32: # BB#0: +; X32-NEXT: movaps {{.*#+}} xmm0 = [3.223542e+00,2.300000e+00,1.200000e+00,1.000000e-01] +; X32-NEXT: retl +; +; X64-LABEL: foo: +; X64: # BB#0: +; X64-NEXT: movaps {{.*#+}} xmm0 = [3.223542e+00,2.300000e+00,1.200000e+00,1.000000e-01] +; X64-NEXT: retq ret <4 x float> } diff --git a/llvm/test/CodeGen/X86/v8i1-masks.ll b/llvm/test/CodeGen/X86/v8i1-masks.ll index 21fe963..d5c3150 100644 --- a/llvm/test/CodeGen/X86/v8i1-masks.ll +++ b/llvm/test/CodeGen/X86/v8i1-masks.ll @@ -1,15 +1,36 @@ -; RUN: llc -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -o - < %s | FileCheck %s - -;CHECK-LABEL: and_masks: -;CHECK: vmovaps -;CHECK: vcmpltp -;CHECK: vcmpltp -;CHECK: vandps -;CHECK: vandps -;CHECK: vmovaps -;CHECK: ret +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64 define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { +; X32-LABEL: and_masks: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: vmovups (%edx), %ymm0 +; X32-NEXT: vmovups (%ecx), %ymm1 +; X32-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 +; X32-NEXT: vmovups (%eax), %ymm2 +; X32-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 +; X32-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-NEXT: vandps LCPI0_0, %ymm0, %ymm0 +; X32-NEXT: vmovaps %ymm0, (%eax) +; X32-NEXT: vzeroupper +; X32-NEXT: retl +; +; X64-LABEL: and_masks: +; X64: ## BB#0: +; X64-NEXT: vmovups (%rdi), %ymm0 +; X64-NEXT: vmovups (%rsi), %ymm1 +; X64-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 +; X64-NEXT: vmovups (%rdx), %ymm2 +; X64-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 +; X64-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovaps %ymm0, (%rax) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %v0 = load <8 x float>, <8 x float>* %a, align 16 %v1 = load <8 x float>, <8 x float>* %b, align 16 %m0 = fcmp olt <8 x float> %v1, %v0 @@ -21,13 +42,30 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi ret void } -;CHECK: neg_mask -;CHECK: vcmpltps -;CHECK: vxorps -;CHECK: vandps -;CHECK: vmovaps -;CHECK: ret define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { +; X32-LABEL: neg_masks: +; X32: ## BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: vmovups (%ecx), %ymm0 +; X32-NEXT: vcmpltps (%eax), %ymm0, %ymm0 +; X32-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X32-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-NEXT: vmovaps %ymm0, (%eax) +; X32-NEXT: vzeroupper +; X32-NEXT: retl +; +; X64-LABEL: neg_masks: +; X64: ## BB#0: +; X64-NEXT: vmovups (%rsi), %ymm0 +; X64-NEXT: vcmpltps (%rdi), %ymm0, %ymm0 +; X64-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X64-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-NEXT: vmovaps %ymm0, (%rax) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %v0 = load <8 x float>, <8 x float>* %a, align 16 %v1 = load <8 x float>, <8 x float>* %b, align 16 %m0 = fcmp olt <8 x float> %v1, %v0