From 5f643ad847de495a0d5ac98bc082d35facf8b56c Mon Sep 17 00:00:00 2001 From: "Andrew V. Tischenko" Date: Fri, 16 Dec 2016 09:56:02 +0000 Subject: [PATCH] Extra coverage tests to demonstrate fixes in D72618 and D26855 llvm-svn: 289931 --- llvm/test/CodeGen/X86/recip-fastmath2.ll | 274 +++++++++++++++++++++++++++++++ llvm/test/CodeGen/X86/vector-sqrt.ll | 60 +++++++ 2 files changed, 334 insertions(+) create mode 100644 llvm/test/CodeGen/X86/recip-fastmath2.ll create mode 100644 llvm/test/CodeGen/X86/vector-sqrt.ll diff --git a/llvm/test/CodeGen/X86/recip-fastmath2.ll b/llvm/test/CodeGen/X86/recip-fastmath2.ll new file mode 100644 index 0000000..0788b03 --- /dev/null +++ b/llvm/test/CodeGen/X86/recip-fastmath2.ll @@ -0,0 +1,274 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX + +; It's the extra tests coverage for recip as discussed on D26855. + +define float @f32_no_step_2(float %x) #3 { +; SSE-LABEL: f32_no_step_2: +; SSE: # BB#0: +; SSE-NEXT: rcpss %xmm0, %xmm0 +; SSE-NEXT: mulss {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: f32_no_step_2: +; AVX: # BB#0: +; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %div = fdiv fast float 1234.0, %x + ret float %div +} + +define float @f32_one_step_2(float %x) #1 { +; SSE-LABEL: f32_one_step_2: +; SSE: # BB#0: +; SSE-NEXT: rcpss %xmm0, %xmm2 +; SSE-NEXT: mulss %xmm2, %xmm0 +; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: mulss %xmm2, %xmm1 +; SSE-NEXT: addss %xmm2, %xmm1 +; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: f32_one_step_2: +; AVX: # BB#0: +; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-NEXT: vsubss %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %div = fdiv fast float 3456.0, %x + ret float %div +} + +define float @f32_two_step_2(float %x) #2 { +; SSE-LABEL: f32_two_step_2: +; SSE: # BB#0: +; SSE-NEXT: rcpss %xmm0, %xmm2 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: mulss %xmm2, %xmm3 +; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: movaps %xmm1, %xmm4 +; SSE-NEXT: subss %xmm3, %xmm4 +; SSE-NEXT: mulss %xmm2, %xmm4 +; SSE-NEXT: addss %xmm2, %xmm4 +; SSE-NEXT: mulss %xmm4, %xmm0 +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: mulss %xmm4, %xmm1 +; SSE-NEXT: addss %xmm4, %xmm1 +; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: f32_two_step_2: +; AVX: # BB#0: +; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2 +; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; AVX-NEXT: vsubss %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm2 +; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vsubss %xmm0, %xmm3, %xmm0 +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %div = fdiv fast float 6789.0, %x + ret float %div +} + +define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 { +; SSE-LABEL: v4f32_one_step2: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm0, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: subps %xmm0, %xmm1 +; SSE-NEXT: mulps %xmm2, %xmm1 +; SSE-NEXT: addps %xmm2, %xmm1 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v4f32_one_step2: +; AVX: # BB#0: +; AVX-NEXT: vrcpps %xmm0, %xmm1 +; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-NEXT: vsubps %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %div = fdiv fast <4 x float> , %x + ret <4 x float> %div +} + +define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 { +; SSE-LABEL: v4f32_two_step2: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm0, %xmm2 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: mulps %xmm2, %xmm3 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm1, %xmm4 +; SSE-NEXT: subps %xmm3, %xmm4 +; SSE-NEXT: mulps %xmm2, %xmm4 +; SSE-NEXT: addps %xmm2, %xmm4 +; SSE-NEXT: mulps %xmm4, %xmm0 +; SSE-NEXT: subps %xmm0, %xmm1 +; SSE-NEXT: mulps %xmm4, %xmm1 +; SSE-NEXT: addps %xmm4, %xmm1 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v4f32_two_step2: +; AVX: # BB#0: +; AVX-NEXT: vrcpps %xmm0, %xmm1 +; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2 +; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-NEXT: vsubps %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm2 +; AVX-NEXT: vaddps %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vsubps %xmm0, %xmm3, %xmm0 +; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %div = fdiv fast <4 x float> , %x + ret <4 x float> %div +} + +define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 { +; SSE-LABEL: v8f32_one_step2: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm1, %xmm4 +; SSE-NEXT: mulps %xmm4, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: subps %xmm1, %xmm3 +; SSE-NEXT: mulps %xmm4, %xmm3 +; SSE-NEXT: addps %xmm4, %xmm3 +; SSE-NEXT: rcpps %xmm0, %xmm1 +; SSE-NEXT: mulps %xmm1, %xmm0 +; SSE-NEXT: subps %xmm0, %xmm2 +; SSE-NEXT: mulps %xmm1, %xmm2 +; SSE-NEXT: addps %xmm1, %xmm2 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: v8f32_one_step2: +; AVX: # BB#0: +; AVX-NEXT: vrcpps %ymm0, %ymm1 +; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-NEXT: retq + %div = fdiv fast <8 x float> , %x + ret <8 x float> %div +} + +define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 { +; SSE-LABEL: v8f32_two_step2: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: rcpps %xmm1, %xmm3 +; SSE-NEXT: movaps %xmm1, %xmm4 +; SSE-NEXT: mulps %xmm3, %xmm4 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: subps %xmm4, %xmm5 +; SSE-NEXT: mulps %xmm3, %xmm5 +; SSE-NEXT: addps %xmm3, %xmm5 +; SSE-NEXT: mulps %xmm5, %xmm1 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: subps %xmm1, %xmm3 +; SSE-NEXT: mulps %xmm5, %xmm3 +; SSE-NEXT: addps %xmm5, %xmm3 +; SSE-NEXT: rcpps %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm2, %xmm4 +; SSE-NEXT: mulps %xmm1, %xmm4 +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: subps %xmm4, %xmm5 +; SSE-NEXT: mulps %xmm1, %xmm5 +; SSE-NEXT: addps %xmm1, %xmm5 +; SSE-NEXT: mulps %xmm5, %xmm2 +; SSE-NEXT: subps %xmm2, %xmm0 +; SSE-NEXT: mulps %xmm5, %xmm0 +; SSE-NEXT: addps %xmm5, %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: v8f32_two_step2: +; AVX: # BB#0: +; AVX-NEXT: vrcpps %ymm0, %ymm1 +; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm2 +; AVX-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-NEXT: vsubps %ymm2, %ymm3, %ymm2 +; AVX-NEXT: vmulps %ymm2, %ymm1, %ymm2 +; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vsubps %ymm0, %ymm3, %ymm0 +; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-NEXT: retq + %div = fdiv fast <8 x float> , %x + ret <8 x float> %div +} + +define <8 x float> @v8f32_no_step(<8 x float> %x) #3 { +; SSE-LABEL: v8f32_no_step: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm0, %xmm0 +; SSE-NEXT: rcpps %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: v8f32_no_step: +; AVX: # BB#0: +; AVX-NEXT: vrcpps %ymm0, %ymm0 +; AVX-NEXT: retq + %div = fdiv fast <8 x float> , %x + ret <8 x float> %div +} + +define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 { +; SSE-LABEL: v8f32_no_step2: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm1, %xmm1 +; SSE-NEXT: rcpps %xmm0, %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: v8f32_no_step2: +; AVX: # BB#0: +; AVX-NEXT: vrcpps %ymm0, %ymm0 +; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-NEXT: retq + %div = fdiv fast <8 x float> , %x + ret <8 x float> %div +} + +attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!divf,!vec-divf" } +attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" } +attributes #2 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:2,vec-divf:2" } +attributes #3 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:0,vec-divf:0" } + diff --git a/llvm/test/CodeGen/X86/vector-sqrt.ll b/llvm/test/CodeGen/X86/vector-sqrt.ll new file mode 100644 index 0000000..03efa54 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-sqrt.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK + +; Function Attrs: nounwind readonly uwtable +define <2 x double> @sqrtd2(double* nocapture readonly %v) local_unnamed_addr #0 { +; CHECK-LABEL: sqrtd2: +; CHECK: vsqrtsd (%rdi), %xmm0, %xmm0 +; CHECK-NEXT: vsqrtsd 8(%rdi), %xmm1, %xmm1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-NEXT: retq +entry: + %0 = load double, double* %v, align 8 + %call = tail call double @sqrt(double %0) #2 + %arrayidx1 = getelementptr inbounds double, double* %v, i64 1 + %1 = load double, double* %arrayidx1, align 8 + %call2 = tail call double @sqrt(double %1) #2 + %vecinit.i = insertelement <2 x double> undef, double %call, i32 0 + %vecinit1.i = insertelement <2 x double> %vecinit.i, double %call2, i32 1 + ret <2 x double> %vecinit1.i +} + +; Function Attrs: nounwind readnone +declare double @sqrt(double) local_unnamed_addr #1 + +; Function Attrs: nounwind readonly uwtable +define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 { +; CHECK-LABEL: sqrtf4: +; CHECK: vsqrtss (%rdi), %xmm0, %xmm0 +; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1 +; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2 +; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3 +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; CHECK-NEXT: retq +entry: + %0 = load float, float* %v, align 4 + %call = tail call float @sqrtf(float %0) #2 + %arrayidx1 = getelementptr inbounds float, float* %v, i64 1 + %1 = load float, float* %arrayidx1, align 4 + %call2 = tail call float @sqrtf(float %1) #2 + %arrayidx3 = getelementptr inbounds float, float* %v, i64 2 + %2 = load float, float* %arrayidx3, align 4 + %call4 = tail call float @sqrtf(float %2) #2 + %arrayidx5 = getelementptr inbounds float, float* %v, i64 3 + %3 = load float, float* %arrayidx5, align 4 + %call6 = tail call float @sqrtf(float %3) #2 + %vecinit.i = insertelement <4 x float> undef, float %call, i32 0 + %vecinit1.i = insertelement <4 x float> %vecinit.i, float %call2, i32 1 + %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %call4, i32 2 + %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %call6, i32 3 + ret <4 x float> %vecinit3.i +} + +; Function Attrs: nounwind readnone +declare float @sqrtf(float) local_unnamed_addr #1 + +attributes #0 = { nounwind readonly uwtable "target-features"="+avx" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone "target-features"="+avx2" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind readnone } -- 2.7.4