From: Andrea Di Biagio Date: Thu, 22 May 2014 16:21:39 +0000 (+0000) Subject: [X86] Improve the lowering of BITCAST from MVT::f64 to MVT::v4i16/MVT::v8i8. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c8dd1ad85b217880fa092eeb890c703de61be007;p=platform%2Fupstream%2Fllvm.git [X86] Improve the lowering of BITCAST from MVT::f64 to MVT::v4i16/MVT::v8i8. This patch teaches the x86 backend how to efficiently lower ISD::BITCAST dag nodes from MVT::f64 to MVT::v4i16 (and vice versa), and from MVT::f64 to MVT::v8i8 (and vice versa). This patch extends the logic from revision 208107 to also handle MVT::v4i16 and MVT::v8i8. Also, this patch correctly propagates Undef values when performing the widening of a vector (example: when widening from v2i32 to v4i32, the upper 64bits of the resulting vector are 'undef'). llvm-svn: 209451 --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 6182875..c300637 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1040,6 +1040,8 @@ void X86TargetLowering::resetOperationActions() { setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); + setOperationAction(ISD::BITCAST, MVT::v4i16, Custom); + setOperationAction(ISD::BITCAST, MVT::v8i8, Custom); } if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) { @@ -14276,19 +14278,31 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget, MVT SrcVT = Op.getOperand(0).getSimpleValueType(); MVT DstVT = Op.getSimpleValueType(); - if (SrcVT == MVT::v2i32) { + if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) { assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); if (DstVT != MVT::f64) // This conversion needs to be expanded. return SDValue(); + SDValue InVec = Op->getOperand(0); SDLoc dl(Op); - SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, - Op->getOperand(0), DAG.getIntPtrConstant(0)); - SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, - Op->getOperand(0), DAG.getIntPtrConstant(1)); - SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0}; - SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts); + unsigned NumElts = SrcVT.getVectorNumElements(); + EVT SVT = SrcVT.getVectorElementType(); + + // Widen the vector in input in the case of MVT::v2i32. + // Example: from MVT::v2i32 to MVT::v4i32. + SmallVector Elts; + for (unsigned i = 0, e = NumElts; i != e; ++i) + Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec, + DAG.getIntPtrConstant(i))); + + // Explicitly mark the extra elements as Undef. + SDValue Undef = DAG.getUNDEF(SVT); + for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i) + Elts.push_back(Undef); + + EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2); + SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts); SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64, DAG.getIntPtrConstant(0)); @@ -14758,17 +14772,23 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, EVT DstVT = N->getValueType(0); EVT SrcVT = N->getOperand(0)->getValueType(0); - if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) { - SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, - MVT::v2f64, N->getOperand(0)); - SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded); - SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, - ToV4I32, DAG.getIntPtrConstant(0)); - SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, - ToV4I32, DAG.getIntPtrConstant(1)); - SDValue Elts[] = {Elt0, Elt1}; - Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts)); - } + if (SrcVT != MVT::f64 || + (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8)) + return; + + unsigned NumElts = DstVT.getVectorNumElements(); + EVT SVT = DstVT.getVectorElementType(); + EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2); + SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, + MVT::v2f64, N->getOperand(0)); + SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded); + + SmallVector Elts; + for (unsigned i = 0, e = NumElts; i != e; ++i) + Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, + ToVecInt, DAG.getIntPtrConstant(i))); + + Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts)); } } } diff --git a/llvm/test/CodeGen/X86/lower-bitcast-v2i32.ll b/llvm/test/CodeGen/X86/lower-bitcast-v2i32.ll deleted file mode 100644 index 1c0de63..0000000 --- a/llvm/test/CodeGen/X86/lower-bitcast-v2i32.ll +++ /dev/null @@ -1,80 +0,0 @@ -; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s - - -define double @test1(double %A) { - %1 = bitcast double %A to <2 x i32> - %add = add <2 x i32> %1, - %2 = bitcast <2 x i32> %add to double - ret double %2 -} -; FIXME: Ideally we should be able to fold the entire body of @test1 into a -; single paddd instruction. At the moment we produce the sequence -; pshufd+paddq+pshufd. - -; CHECK-LABEL: test1 -; CHECK-NOT: movsd -; CHECK: pshufd -; CHECK-NEXT: paddq -; CHECK-NEXT: pshufd -; CHECK-NEXT: ret - - -define double @test2(double %A, double %B) { - %1 = bitcast double %A to <2 x i32> - %2 = bitcast double %B to <2 x i32> - %add = add <2 x i32> %1, %2 - %3 = bitcast <2 x i32> %add to double - ret double %3 -} -; FIXME: Ideally we should be able to fold the entire body of @test2 into a -; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the -; sequence pshufd+pshufd+paddq+pshufd. - -; CHECK-LABEL: test2 -; CHECK-NOT: movsd -; CHECK: pshufd -; CHECK-NEXT: pshufd -; CHECK-NEXT: paddq -; CHECK-NEXT: pshufd -; CHECK-NEXT: ret - - -define i64 @test3(i64 %A) { - %1 = bitcast i64 %A to <2 x float> - %add = fadd <2 x float> %1, - %2 = bitcast <2 x float> %add to i64 - ret i64 %2 -} -; CHECK-LABEL: test3 -; CHECK-NOT: pshufd -; CHECK: addps -; CHECK-NOT: pshufd -; CHECK: ret - - -define i64 @test4(i64 %A) { - %1 = bitcast i64 %A to <2 x i32> - %add = add <2 x i32> %1, - %2 = bitcast <2 x i32> %add to i64 - ret i64 %2 -} -; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd. -; Ideally, we should fold that sequence into a single paddd. - -; CHECK-LABEL: test4 -; CHECK: pshufd -; CHECK-NEXT: paddq -; CHECK-NEXT: pshufd -; CHECK: ret - - -define double @test5(double %A) { - %1 = bitcast double %A to <2 x float> - %add = fadd <2 x float> %1, - %2 = bitcast <2 x float> %add to double - ret double %2 -} -; CHECK-LABEL: test5 -; CHECK: addps -; CHECK-NEXT: ret - diff --git a/llvm/test/CodeGen/X86/lower-bitcast.ll b/llvm/test/CodeGen/X86/lower-bitcast.ll new file mode 100644 index 0000000..b9b29a5 --- /dev/null +++ b/llvm/test/CodeGen/X86/lower-bitcast.ll @@ -0,0 +1,155 @@ +; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s + + +define double @test1(double %A) { + %1 = bitcast double %A to <2 x i32> + %add = add <2 x i32> %1, + %2 = bitcast <2 x i32> %add to double + ret double %2 +} +; FIXME: Ideally we should be able to fold the entire body of @test1 into a +; single paddd instruction. At the moment we produce the sequence +; pshufd+paddq+pshufd. + +; CHECK-LABEL: test1 +; CHECK-NOT: movsd +; CHECK: pshufd +; CHECK-NEXT: paddq +; CHECK-NEXT: pshufd +; CHECK-NEXT: ret + + +define double @test2(double %A, double %B) { + %1 = bitcast double %A to <2 x i32> + %2 = bitcast double %B to <2 x i32> + %add = add <2 x i32> %1, %2 + %3 = bitcast <2 x i32> %add to double + ret double %3 +} +; FIXME: Ideally we should be able to fold the entire body of @test2 into a +; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the +; sequence pshufd+pshufd+paddq+pshufd. + +; CHECK-LABEL: test2 +; CHECK-NOT: movsd +; CHECK: pshufd +; CHECK-NEXT: pshufd +; CHECK-NEXT: paddq +; CHECK-NEXT: pshufd +; CHECK-NEXT: ret + + +define i64 @test3(i64 %A) { + %1 = bitcast i64 %A to <2 x float> + %add = fadd <2 x float> %1, + %2 = bitcast <2 x float> %add to i64 + ret i64 %2 +} +; CHECK-LABEL: test3 +; CHECK-NOT: pshufd +; CHECK: addps +; CHECK-NOT: pshufd +; CHECK: ret + + +define i64 @test4(i64 %A) { + %1 = bitcast i64 %A to <2 x i32> + %add = add <2 x i32> %1, + %2 = bitcast <2 x i32> %add to i64 + ret i64 %2 +} +; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd. +; Ideally, we should fold that sequence into a single paddd. + +; CHECK-LABEL: test4 +; CHECK: pshufd +; CHECK-NEXT: paddq +; CHECK-NEXT: pshufd +; CHECK: ret + + +define double @test5(double %A) { + %1 = bitcast double %A to <2 x float> + %add = fadd <2 x float> %1, + %2 = bitcast <2 x float> %add to double + ret double %2 +} +; CHECK-LABEL: test5 +; CHECK: addps +; CHECK-NEXT: ret + + +define double @test6(double %A) { + %1 = bitcast double %A to <4 x i16> + %add = add <4 x i16> %1, + %2 = bitcast <4 x i16> %add to double + ret double %2 +} +; FIXME: Ideally we should be able to fold the entire body of @test6 into a +; single paddw instruction. + +; CHECK-LABEL: test6 +; CHECK-NOT: movsd +; CHECK: punpcklwd +; CHECK-NEXT: paddd +; CHECK-NEXT: pshufb +; CHECK-NEXT: ret + + +define double @test7(double %A, double %B) { + %1 = bitcast double %A to <4 x i16> + %2 = bitcast double %B to <4 x i16> + %add = add <4 x i16> %1, %2 + %3 = bitcast <4 x i16> %add to double + ret double %3 +} +; FIXME: Ideally we should be able to fold the entire body of @test7 into a +; single 'paddw %xmm1, %xmm0' instruction. At the moment we produce the +; sequence pshufd+pshufd+paddd+pshufd. + +; CHECK-LABEL: test7 +; CHECK-NOT: movsd +; CHECK: punpcklwd +; CHECK-NEXT: punpcklwd +; CHECK-NEXT: paddd +; CHECK-NEXT: pshufb +; CHECK-NEXT: ret + + +define double @test8(double %A) { + %1 = bitcast double %A to <8 x i8> + %add = add <8 x i8> %1, + %2 = bitcast <8 x i8> %add to double + ret double %2 +} +; FIXME: Ideally we should be able to fold the entire body of @test8 into a +; single paddb instruction. At the moment we produce the sequence +; pshufd+paddw+pshufd. + +; CHECK-LABEL: test8 +; CHECK-NOT: movsd +; CHECK: punpcklbw +; CHECK-NEXT: paddw +; CHECK-NEXT: pshufb +; CHECK-NEXT: ret + + +define double @test9(double %A, double %B) { + %1 = bitcast double %A to <8 x i8> + %2 = bitcast double %B to <8 x i8> + %add = add <8 x i8> %1, %2 + %3 = bitcast <8 x i8> %add to double + ret double %3 +} +; FIXME: Ideally we should be able to fold the entire body of @test9 into a +; single 'paddb %xmm1, %xmm0' instruction. At the moment we produce the +; sequence pshufd+pshufd+paddw+pshufd. + +; CHECK-LABEL: test9 +; CHECK-NOT: movsd +; CHECK: punpcklbw +; CHECK-NEXT: punpcklbw +; CHECK-NEXT: paddw +; CHECK-NEXT: pshufb +; CHECK-NEXT: ret + diff --git a/llvm/test/CodeGen/X86/ret-mmx.ll b/llvm/test/CodeGen/X86/ret-mmx.ll index 7adf307..fc9c78d 100644 --- a/llvm/test/CodeGen/X86/ret-mmx.ll +++ b/llvm/test/CodeGen/X86/ret-mmx.ll @@ -33,8 +33,8 @@ define <2 x i32> @t3() nounwind { define double @t4() nounwind { ret double bitcast (<2 x i32> to double) ; CHECK-LABEL: t4: -; CHECK-NOT: movl $1 +; CHECK: movl $1 ; CHECK-NOT: pshufd -; CHECK: movsd {{.*}}, %xmm0 +; CHECK: movd {{.*}}, %xmm0 }