From: Simon Pilgrim Date: Wed, 24 Aug 2016 11:56:15 +0000 (+0000) Subject: [X86][F16C] Regenerated f16c tests X-Git-Tag: llvmorg-4.0.0-rc1~11588 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3c8cd3df5e87e7549202af7f1b404ec0d106ecb1;p=platform%2Fupstream%2Fllvm.git [X86][F16C] Regenerated f16c tests llvm-svn: 279621 --- diff --git a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll index 6b7d395..aabb79e 100644 --- a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=X32 -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=X64 +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=X64 -; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse3-builtins.c +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/f16c-builtins.c define float @test_cvtsh_ss(i16 %a0) nounwind { ; X32-LABEL: test_cvtsh_ss: diff --git a/llvm/test/CodeGen/X86/f16c-intrinsics.ll b/llvm/test/CodeGen/X86/f16c-intrinsics.ll index a78022a..712fe81 100644 --- a/llvm/test/CodeGen/X86/f16c-intrinsics.ll +++ b/llvm/test/CodeGen/X86/f16c-intrinsics.ll @@ -1,10 +1,17 @@ -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+f16c | FileCheck %s -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=X64 define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) { - ; CHECK-LABEL: test_x86_vcvtph2ps_128: - ; CHECK-NOT: vmov - ; CHECK: vcvtph2ps +; X32-LABEL: test_x86_vcvtph2ps_128: +; X32: # BB#0: +; X32-NEXT: vcvtph2ps %xmm0, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtph2ps_128: +; X64: # BB#0: +; X64-NEXT: vcvtph2ps %xmm0, %xmm0 +; X64-NEXT: retq %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -12,47 +19,79 @@ declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) { - ; CHECK-LABEL: test_x86_vcvtph2ps_256: - ; CHECK-NOT: vmov - ; CHECK: vcvtph2ps +; X32-LABEL: test_x86_vcvtph2ps_256: +; X32: # BB#0: +; X32-NEXT: vcvtph2ps %xmm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtph2ps_256: +; X64: # BB#0: +; X64-NEXT: vcvtph2ps %xmm0, %ymm0 +; X64-NEXT: retq %res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly define <8 x float> @test_x86_vcvtph2ps_256_m(<8 x i16>* nocapture %a) nounwind { -entry: - ; CHECK-LABEL: test_x86_vcvtph2ps_256_m: - ; CHECK-NOT: vmov - ; CHECK: vcvtph2ps (% - %tmp1 = load <8 x i16>, <8 x i16>* %a, align 16 - %0 = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %tmp1) - ret <8 x float> %0 +; X32-LABEL: test_x86_vcvtph2ps_256_m: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtph2ps (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtph2ps_256_m: +; X64: # BB#0: +; X64-NEXT: vcvtph2ps (%rdi), %ymm0 +; X64-NEXT: retq + %load = load <8 x i16>, <8 x i16>* %a, align 16 + %res = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %load) + ret <8 x float> %res } define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) { - ; CHECK-LABEL: test_x86_vcvtps2ph_128: - ; CHECK-NOT: vmov - ; CHECK: vcvtps2ph +; X32-LABEL: test_x86_vcvtps2ph_128: +; X32: # BB#0: +; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_128: +; X64: # BB#0: +; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0 +; X64-NEXT: retq %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) { - ; CHECK-LABEL: test_x86_vcvtps2ph_256: - ; CHECK-NOT: vmov - ; CHECK: vcvtps2ph +; X32-LABEL: test_x86_vcvtps2ph_256: +; X32: # BB#0: +; X32-NEXT: vcvtps2ph $0, %ymm0, %xmm0 +; X32-NEXT: vzeroupper +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_256: +; X64: # BB#0: +; X64-NEXT: vcvtps2ph $0, %ymm0, %xmm0 +; X64-NEXT: vzeroupper +; X64-NEXT: retq %res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) { -; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar: -; CHECK-NOT: vmov -; CHECK: vcvtph2ps (% - +; X32-LABEL: test_x86_vcvtps2ph_128_scalar: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtph2ps (%eax), %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_128_scalar: +; X64: # BB#0: +; X64-NEXT: vcvtph2ps (%rdi), %xmm0 +; X64-NEXT: retq %load = load i64, i64* %ptr %ins1 = insertelement <2 x i64> undef, i64 %load, i32 0 %ins2 = insertelement <2 x i64> %ins1, i64 0, i32 1 @@ -62,10 +101,16 @@ define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) { } define <4 x float> @test_x86_vcvtps2ph_128_scalar2(i64* %ptr) { -; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar2: -; CHECK-NOT: vmov -; CHECK: vcvtph2ps (% - +; X32-LABEL: test_x86_vcvtps2ph_128_scalar2: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtph2ps (%eax), %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_128_scalar2: +; X64: # BB#0: +; X64-NEXT: vcvtph2ps (%rdi), %xmm0 +; X64-NEXT: retq %load = load i64, i64* %ptr %ins = insertelement <2 x i64> undef, i64 %load, i32 0 %bc = bitcast <2 x i64> %ins to <8 x i16> @@ -74,20 +119,36 @@ define <4 x float> @test_x86_vcvtps2ph_128_scalar2(i64* %ptr) { } define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind { +; X32-LABEL: test_x86_vcvtps2ph_256_m: +; X32: # BB#0: # %entry +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtps2ph $3, %ymm0, (%eax) +; X32-NEXT: vzeroupper +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_256_m: +; X64: # BB#0: # %entry +; X64-NEXT: vcvtps2ph $3, %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq entry: - ; CHECK-LABEL: test_x86_vcvtps2ph_256_m: - ; CHECK-NOT: vmov - ; CHECK: vcvtps2ph $3, %ymm0, (% %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3) store <8 x i16> %0, <8 x i16>* %d, align 16 ret void } define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) nounwind { +; X32-LABEL: test_x86_vcvtps2ph_128_m: +; X32: # BB#0: # %entry +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_128_m: +; X64: # BB#0: # %entry +; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) +; X64-NEXT: retq entry: - ; CHECK-LABEL: test_x86_vcvtps2ph_128_m: - ; CHECK-NOT: vmov - ; CHECK: vcvtps2ph $3, %xmm0, (% %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3) %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <4 x i32> store <4 x i16> %1, <4 x i16>* %d, align 8 @@ -95,10 +156,17 @@ entry: } define void @test_x86_vcvtps2ph_128_m2(double* nocapture %hf4x16, <4 x float> %f4x32) #0 { +; X32-LABEL: test_x86_vcvtps2ph_128_m2: +; X32: # BB#0: # %entry +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_128_m2: +; X64: # BB#0: # %entry +; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) +; X64-NEXT: retq entry: - ; CHECK-LABEL: test_x86_vcvtps2ph_128_m2: - ; CHECK-NOT: vmov - ; CHECK: vcvtps2ph $3, %xmm0, (% %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3) %1 = bitcast <8 x i16> %0 to <2 x double> %vecext = extractelement <2 x double> %1, i32 0 @@ -107,10 +175,17 @@ entry: } define void @test_x86_vcvtps2ph_128_m3(i64* nocapture %hf4x16, <4 x float> %f4x32) #0 { +; X32-LABEL: test_x86_vcvtps2ph_128_m3: +; X32: # BB#0: # %entry +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) +; X32-NEXT: retl +; +; X64-LABEL: test_x86_vcvtps2ph_128_m3: +; X64: # BB#0: # %entry +; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) +; X64-NEXT: retq entry: - ; CHECK-LABEL: test_x86_vcvtps2ph_128_m3: - ; CHECK-NOT: vmov - ; CHECK: vcvtps2ph $3, %xmm0, (% %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3) %1 = bitcast <8 x i16> %0 to <2 x i64> %vecext = extractelement <2 x i64> %1, i32 0