From dcecc7ea468d62dda89bd7666001a1412dae6345 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 3 Sep 2019 02:51:10 +0000 Subject: [PATCH] [X86] Custom promote i32->f80 uint_to_fp on AVX512 64-bit targets. Reuse the same code to promote all i32 uint_to_fp on 64-bit targets to simplify the X86ISelLowering constructor. llvm-svn: 370693 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 15 +++++++-------- llvm/test/CodeGen/X86/scalar-int-to-fp.ll | 22 ++++++---------------- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index f29d50f..6f0fb3a 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -221,14 +221,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); - if (Subtarget.is64Bit()) { - if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) - // f32/f64 are legal, f80 is custom. - setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); - else - setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); - setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); - } else if (!Subtarget.useSoftFloat()) { + if (!Subtarget.useSoftFloat()) { // We have an algorithm for SSE2->double, and we turn this into a // 64-bit FILD followed by conditional FADD for other targets. setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); @@ -18657,6 +18650,12 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, return Op; } + // Promote i32 to i64 and use a signed conversion on 64-bit targets. + if (SrcVT == MVT::i32 && Subtarget.is64Bit()) { + N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, N0); + return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, N0); + } + if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget)) return V; diff --git a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll index 8a46b96..580da15 100644 --- a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll +++ b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll @@ -230,15 +230,12 @@ define x86_fp80 @u32_to_x(i32 %a) nounwind { ; AVX512_32-NEXT: popl %ebp ; AVX512_32-NEXT: retl ; -; AVX512_64-LABEL: u32_to_x: -; AVX512_64: # %bb.0: -; AVX512_64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512_64-NEXT: vmovd %edi, %xmm1 -; AVX512_64-NEXT: vpor %xmm0, %xmm1, %xmm1 -; AVX512_64-NEXT: vsubsd %xmm0, %xmm1, %xmm0 -; AVX512_64-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp) -; AVX512_64-NEXT: fldl -{{[0-9]+}}(%rsp) -; AVX512_64-NEXT: retq +; CHECK64-LABEL: u32_to_x: +; CHECK64: # %bb.0: +; CHECK64-NEXT: movl %edi, %eax +; CHECK64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) +; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp) +; CHECK64-NEXT: retq ; ; SSE2_32-LABEL: u32_to_x: ; SSE2_32: # %bb.0: @@ -256,13 +253,6 @@ define x86_fp80 @u32_to_x(i32 %a) nounwind { ; SSE2_32-NEXT: popl %ebp ; SSE2_32-NEXT: retl ; -; SSE2_64-LABEL: u32_to_x: -; SSE2_64: # %bb.0: -; SSE2_64-NEXT: movl %edi, %eax -; SSE2_64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) -; SSE2_64-NEXT: fildll -{{[0-9]+}}(%rsp) -; SSE2_64-NEXT: retq -; ; X87-LABEL: u32_to_x: ; X87: # %bb.0: ; X87-NEXT: pushl %ebp -- 2.7.4