From 42b69dd961a008446a326c409b99f61cce632484 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 5 Sep 2016 02:20:53 +0000 Subject: [PATCH] [X86] Add AVX and AVX512 command lines to the vec_ss_load_fold test. llvm-svn: 280645 --- llvm/test/CodeGen/X86/vec_ss_load_fold.ll | 130 ++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll index 3d69f06..3d4fe11 100644 --- a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll +++ b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll @@ -1,6 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefix=X32 ; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefix=X32_AVX +; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefix=X64_AVX +; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefix=X32_AVX +; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefix=X64_AVX define i16 @test1(float %f) nounwind { ; X32-LABEL: test1: @@ -26,6 +30,30 @@ define i16 @test1(float %f) nounwind { ; X64-NEXT: cvttss2si %xmm0, %eax ; X64-NEXT: ## kill: %AX %AX %EAX ; X64-NEXT: retq +; +; X32_AVX-LABEL: test1: +; X32_AVX: ## BB#0: +; X32_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X32_AVX-NEXT: vsubss LCPI0_0, %xmm0, %xmm0 +; X32_AVX-NEXT: vmulss LCPI0_1, %xmm0, %xmm0 +; X32_AVX-NEXT: vminss LCPI0_2, %xmm0, %xmm0 +; X32_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 +; X32_AVX-NEXT: vcvttss2si %xmm0, %eax +; X32_AVX-NEXT: ## kill: %AX %AX %EAX +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: test1: +; X64_AVX: ## BB#0: +; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64_AVX-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; X64_AVX-NEXT: vsubss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 +; X64_AVX-NEXT: vcvttss2si %xmm0, %eax +; X64_AVX-NEXT: ## kill: %AX %AX %EAX +; X64_AVX-NEXT: retq %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1] %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1] %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1] @@ -62,6 +90,29 @@ define i16 @test2(float %f) nounwind { ; X64-NEXT: cvttss2si %xmm0, %eax ; X64-NEXT: ## kill: %AX %AX %EAX ; X64-NEXT: retq +; +; X32_AVX-LABEL: test2: +; X32_AVX: ## BB#0: +; X32_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32_AVX-NEXT: vaddss LCPI1_0, %xmm0, %xmm0 +; X32_AVX-NEXT: vmulss LCPI1_1, %xmm0, %xmm0 +; X32_AVX-NEXT: vminss LCPI1_2, %xmm0, %xmm0 +; X32_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X32_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 +; X32_AVX-NEXT: vcvttss2si %xmm0, %eax +; X32_AVX-NEXT: ## kill: %AX %AX %EAX +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: test2: +; X64_AVX: ## BB#0: +; X64_AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 +; X64_AVX-NEXT: vcvttss2si %xmm0, %eax +; X64_AVX-NEXT: ## kill: %AX %AX %EAX +; X64_AVX-NEXT: retq %tmp28 = fsub float %f, 1.000000e+00 ; [#uses=1] %tmp37 = fmul float %tmp28, 5.000000e-01 ; [#uses=1] %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0 ; <<4 x float>> [#uses=1] @@ -97,6 +148,17 @@ define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind { ; X64: ## BB#0: ; X64-NEXT: roundss $4, (%rdi), %xmm0 ; X64-NEXT: retq +; +; X32_AVX-LABEL: test3: +; X32_AVX: ## BB#0: +; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32_AVX-NEXT: vroundss $4, (%eax), %xmm0, %xmm0 +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: test3: +; X64_AVX: ## BB#0: +; X64_AVX-NEXT: vroundss $4, (%rdi), %xmm0, %xmm0 +; X64_AVX-NEXT: retq %a = load float , float *%b %B = insertelement <4 x float> undef, float %a, i32 0 %X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %A, <4 x float> %B, i32 4) @@ -126,6 +188,27 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind { ; X64-NEXT: roundss $4, %xmm1, %xmm0 ; X64-NEXT: addq $24, %rsp ; X64-NEXT: retq +; +; X32_AVX-LABEL: test4: +; X32_AVX: ## BB#0: +; X32_AVX-NEXT: subl $28, %esp +; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32_AVX-NEXT: vmovaps %xmm0, (%esp) ## 16-byte Spill +; X32_AVX-NEXT: calll _f +; X32_AVX-NEXT: vroundss $4, (%esp), %xmm0, %xmm0 ## 16-byte Folded Reload +; X32_AVX-NEXT: addl $28, %esp +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: test4: +; X64_AVX: ## BB#0: +; X64_AVX-NEXT: subq $24, %rsp +; X64_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X64_AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill +; X64_AVX-NEXT: callq _f +; X64_AVX-NEXT: vroundss $4, (%rsp), %xmm0, %xmm0 ## 16-byte Folded Reload +; X64_AVX-NEXT: addq $24, %rsp +; X64_AVX-NEXT: retq %a = load float , float *%b %B = insertelement <4 x float> undef, float %a, i32 0 %q = call <4 x float> @f() @@ -148,6 +231,20 @@ define <2 x double> @test5() nounwind uwtable readnone noinline { ; X64-NEXT: movl $128, %eax ; X64-NEXT: cvtsi2sdl %eax, %xmm0 ; X64-NEXT: retq +; +; X32_AVX-LABEL: test5: +; X32_AVX: ## BB#0: ## %entry +; X32_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02] +; X32_AVX-NEXT: movl $128, %eax +; X32_AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0 +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: test5: +; X64_AVX: ## BB#0: ## %entry +; X64_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02] +; X64_AVX-NEXT: movl $128, %eax +; X64_AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0 +; X64_AVX-NEXT: retq entry: %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> , i32 128) nounwind readnone ret <2 x double> %0 @@ -166,6 +263,17 @@ define <4 x float> @minss_fold(float* %x, <4 x float> %y) { ; X64: ## BB#0: ## %entry ; X64-NEXT: minss (%rdi), %xmm0 ; X64-NEXT: retq +; +; X32_AVX-LABEL: minss_fold: +; X32_AVX: ## BB#0: ## %entry +; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32_AVX-NEXT: vminss (%eax), %xmm0, %xmm0 +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: minss_fold: +; X64_AVX: ## BB#0: ## %entry +; X64_AVX-NEXT: vminss (%rdi), %xmm0, %xmm0 +; X64_AVX-NEXT: retq entry: %0 = load float, float* %x, align 1 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0 @@ -187,6 +295,17 @@ define <4 x float> @maxss_fold(float* %x, <4 x float> %y) { ; X64: ## BB#0: ## %entry ; X64-NEXT: maxss (%rdi), %xmm0 ; X64-NEXT: retq +; +; X32_AVX-LABEL: maxss_fold: +; X32_AVX: ## BB#0: ## %entry +; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32_AVX-NEXT: vmaxss (%eax), %xmm0, %xmm0 +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: maxss_fold: +; X64_AVX: ## BB#0: ## %entry +; X64_AVX-NEXT: vmaxss (%rdi), %xmm0, %xmm0 +; X64_AVX-NEXT: retq entry: %0 = load float, float* %x, align 1 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0 @@ -208,6 +327,17 @@ define <4 x float> @cmpss_fold(float* %x, <4 x float> %y) { ; X64: ## BB#0: ## %entry ; X64-NEXT: cmpeqss (%rdi), %xmm0 ; X64-NEXT: retq +; +; X32_AVX-LABEL: cmpss_fold: +; X32_AVX: ## BB#0: ## %entry +; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32_AVX-NEXT: vcmpeqss (%eax), %xmm0, %xmm0 +; X32_AVX-NEXT: retl +; +; X64_AVX-LABEL: cmpss_fold: +; X64_AVX: ## BB#0: ## %entry +; X64_AVX-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 +; X64_AVX-NEXT: retq entry: %0 = load float, float* %x, align 1 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0 -- 2.7.4