From 35585aff34837f227f9103704af48eaad810df13 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 4 Dec 2018 04:51:07 +0000 Subject: [PATCH] [X86] Remove custom DAG combine for SIGN_EXTEND_VECTOR_INREG/ZERO_EXTEND_VECTOR_INREG. We only needed this because it provided really aggressive constant folding even through constant pool entries created from build_vectors. The main case was for vXi8 MULH legalization which was happening as part of legalize DAG instead of as part of legalize vector ops. Now its part of vector op legalization and we've added special handling for build vectors of all constants there. This has removed the need for this code on the list tests we have. llvm-svn: 348237 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 45 --------------------------------- 1 file changed, 45 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b1a13f8..f810200 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1805,8 +1805,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG); - setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG); setTargetDAGCombine(ISD::SINT_TO_FP); setTargetDAGCombine(ISD::UINT_TO_FP); setTargetDAGCombine(ISD::SETCC); @@ -40666,46 +40664,6 @@ static SDValue combineSub(SDNode *N, SelectionDAG &DAG, return combineAddOrSubToADCOrSBB(N, DAG); } -static SDValue combineExtendVectorInreg(SDNode *N, SelectionDAG &DAG, - TargetLowering::DAGCombinerInfo &DCI, - const X86Subtarget &Subtarget) { - if (DCI.isBeforeLegalize()) - return SDValue(); - - SDLoc DL(N); - unsigned Opcode = N->getOpcode(); - MVT VT = N->getSimpleValueType(0); - MVT SVT = VT.getVectorElementType(); - unsigned NumElts = VT.getVectorNumElements(); - unsigned EltSizeInBits = SVT.getSizeInBits(); - - SDValue Op = N->getOperand(0); - MVT OpVT = Op.getSimpleValueType(); - MVT OpEltVT = OpVT.getVectorElementType(); - unsigned OpEltSizeInBits = OpEltVT.getSizeInBits(); - - // Perform any constant folding. - // FIXME: Reduce constant pool usage and don't fold when OptSize is enabled. - APInt UndefElts; - SmallVector EltBits; - if (getTargetConstantBitsFromNode(Op, OpEltSizeInBits, UndefElts, EltBits)) { - APInt Undefs(NumElts, 0); - SmallVector Vals(NumElts, APInt(EltSizeInBits, 0)); - bool IsZEXT = (Opcode == ISD::ZERO_EXTEND_VECTOR_INREG); - for (unsigned i = 0; i != NumElts; ++i) { - if (UndefElts[i]) { - Undefs.setBit(i); - continue; - } - Vals[i] = IsZEXT ? EltBits[i].zextOrTrunc(EltSizeInBits) - : EltBits[i].sextOrTrunc(EltSizeInBits); - } - return getConstVector(Vals, Undefs, VT, DAG, DL); - } - - return SDValue(); -} - static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { MVT VT = N->getSimpleValueType(0); @@ -41098,9 +41056,6 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::VSRAI: case X86ISD::VSRLI: return combineVectorShiftImm(N, DAG, DCI, Subtarget); - case ISD::SIGN_EXTEND_VECTOR_INREG: - case ISD::ZERO_EXTEND_VECTOR_INREG: - return combineExtendVectorInreg(N, DAG, DCI, Subtarget); case X86ISD::PINSRB: case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget); case X86ISD::SHUFP: // Handle all target specific shuffles -- 2.7.4