From: Sanjay Patel Date: Thu, 10 May 2018 14:58:47 +0000 (+0000) Subject: [x86] fix test names; NFC X-Git-Tag: llvmorg-7.0.0-rc1~6337 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ec9b6be26fbd2444eb935fe2752fd45ef46f3b11;p=platform%2Fupstream%2Fllvm.git [x86] fix test names; NFC llvm-svn: 331989 --- diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll index beeefac..c6c9fa7 100644 --- a/llvm/test/CodeGen/X86/fmaxnum.ll +++ b/llvm/test/CodeGen/X86/fmaxnum.ll @@ -335,8 +335,8 @@ define <4 x float> @maxnum_intrinsic_nnan_fmf_f432(<4 x float> %a, <4 x float> % ; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold. -define float @maxnum_intrinsic_nnan_fmf_f32(float %a, float %b) #0 { -; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f32: +define float @maxnum_intrinsic_nnan_attr_f32(float %a, float %b) #0 { +; SSE-LABEL: maxnum_intrinsic_nnan_attr_f32: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: cmpunordss %xmm0, %xmm2 @@ -348,7 +348,7 @@ define float @maxnum_intrinsic_nnan_fmf_f32(float %a, float %b) #0 { ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f32: +; AVX-LABEL: maxnum_intrinsic_nnan_attr_f32: ; AVX: # %bb.0: ; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll index 7374f8e9..f520503 100644 --- a/llvm/test/CodeGen/X86/fminnum.ll +++ b/llvm/test/CodeGen/X86/fminnum.ll @@ -327,8 +327,8 @@ define <2 x double> @minnum_intrinsic_nnan_fmf_v2f64(<2 x double> %a, <2 x doubl ; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold. -define double @minnum_intrinsic_nnan_fmf_f64(double %a, double %b) #0 { -; SSE-LABEL: minnum_intrinsic_nnan_fmf_f64: +define double @minnum_intrinsic_nnan_attr_f64(double %a, double %b) #0 { +; SSE-LABEL: minnum_intrinsic_nnan_attr_f64: ; SSE: # %bb.0: ; SSE-NEXT: movapd %xmm0, %xmm2 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2 @@ -340,13 +340,13 @@ define double @minnum_intrinsic_nnan_fmf_f64(double %a, double %b) #0 { ; SSE-NEXT: movapd %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: minnum_intrinsic_nnan_fmf_f64: +; AVX-LABEL: minnum_intrinsic_nnan_attr_f64: ; AVX: # %bb.0: ; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0 ; AVX-NEXT: retq - %r = tail call nnan double @llvm.minnum.f64(double %a, double %b) + %r = tail call double @llvm.minnum.f64(double %a, double %b) ret double %r }