From: Renato Golin Date: Sun, 23 Apr 2017 12:15:30 +0000 (+0000) Subject: Revert "[APInt] Fix a few places that use APInt::getRawData to operate within the... X-Git-Tag: llvmorg-5.0.0-rc1~6926 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4abfb3d7414e616cd84034735893528f6ca0c35a;p=platform%2Fupstream%2Fllvm.git Revert "[APInt] Fix a few places that use APInt::getRawData to operate within the normal API." This reverts commit r301105, 4, 3 and 1, as a follow up of the previous revert, which broke even more bots. For reference: Revert "[APInt] Use operator<<= where possible. NFC" Revert "[APInt] Use operator<<= instead of shl where possible. NFC" Revert "[APInt] Use ashInPlace where possible." PR32754. llvm-svn: 301111 --- diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h index 7a76a5b..c697402 100644 --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -876,13 +876,6 @@ public: return *this; } - /// \brief Left-shift assignment function. - /// - /// Shifts *this left by shiftAmt and assigns the result to *this. - /// - /// \returns *this after shifting left by ShiftAmt - APInt &operator<<=(const APInt &ShiftAmt); - /// @} /// \name Binary Operators /// @{ @@ -964,11 +957,7 @@ public: /// \brief Left-shift function. /// /// Left-shift this APInt by shiftAmt. - APInt shl(const APInt &ShiftAmt) const { - APInt R(*this); - R <<= ShiftAmt; - return R; - } + APInt shl(const APInt &shiftAmt) const; /// \brief Rotate left by rotateAmt. APInt rotl(const APInt &rotateAmt) const; diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 00003b5..cac22af 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -1949,7 +1949,8 @@ bool MIParser::getHexUint(APInt &Result) { return true; StringRef V = S.substr(2); APInt A(V.size()*4, V, 16); - Result = A.zextOrTrunc(A.getActiveBits()); + Result = APInt(A.getActiveBits(), + ArrayRef(A.getRawData(), A.getNumWords())); return false; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 80df6b8..4702d63 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5343,7 +5343,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1); SDValue Shift; if (c2 > c1) { - Mask <<= c2 - c1; + Mask = Mask.shl(c2 - c1); SDLoc DL(N); Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), DAG.getConstant(c2 - c1, DL, N1.getValueType())); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 1961e28..3bae3bf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2589,7 +2589,7 @@ SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) { DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); APInt Shift(Sz, 1); - Shift <<= J; + Shift = Shift.shl(J); Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index c8a9dcb..c1cb5d9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -158,7 +158,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo) { // and the low 64 bits here. if (DAG.getDataLayout().isBigEndian() && CN->getValueType(0).getSimpleVT() == llvm::MVT::ppcf128) { - APInt Val = CN->getValueAPF().bitcastToAPInt().rotl(64); + uint64_t words[2] = { CN->getValueAPF().bitcastToAPInt().getRawData()[1], + CN->getValueAPF().bitcastToAPInt().getRawData()[0] }; + APInt Val(128, words); return DAG.getConstant(Val, SDLoc(CN), TLI.getTypeToTransformTo(*DAG.getContext(), CN->getValueType(0))); @@ -1058,10 +1060,10 @@ void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo, APInt C = cast(N)->getValueAPF().bitcastToAPInt(); SDLoc dl(N); Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT), - C.extractBits(64, 64)), + APInt(64, C.getRawData()[1])), dl, NVT); Hi = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT), - C.extractBits(64, 0)), + APInt(64, C.getRawData()[0])), dl, NVT); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index eb0b0b9..523f409 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2323,8 +2323,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, Depth + 1); - KnownZero <<= *ShAmt; - KnownOne <<= *ShAmt; + KnownZero = KnownZero << *ShAmt; + KnownOne = KnownOne << *ShAmt; // Low bits are known zero. KnownZero.setLowBits(ShAmt->getZExtValue()); } @@ -4160,7 +4160,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { unsigned FromBits = EVT.getScalarSizeInBits(); Val <<= Val.getBitWidth() - FromBits; - Val.ashrInPlace(Val.getBitWidth() - FromBits); + Val = Val.ashr(Val.getBitWidth() - FromBits); return getConstant(Val, DL, ConstantVT); }; diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 16ad1a3..069fb5b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1714,7 +1714,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, bestWidth = width; break; } - newMask <<= width; + newMask = newMask << width; } } } @@ -2981,7 +2981,7 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d, Flags.setExact(true); Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags); Created.push_back(Op1.getNode()); - d.ashrInPlace(ShAmt); + d = d.ashr(ShAmt); } // Calculate the multiplicative inverse, using Newton's method. diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index 9684443..10b4e98 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1565,7 +1565,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, Tmp = Tmp.zext(SrcBitSize); Tmp = TempSrc.AggregateVal[SrcElt++].IntVal; Tmp = Tmp.zext(DstBitSize); - Tmp <<= ShiftAmt; + Tmp = Tmp.shl(ShiftAmt); ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; Elt.IntVal |= Tmp; } diff --git a/llvm/lib/Support/APInt.cpp b/llvm/lib/Support/APInt.cpp index 1c697d3..9bb364a 100644 --- a/llvm/lib/Support/APInt.cpp +++ b/llvm/lib/Support/APInt.cpp @@ -844,7 +844,7 @@ APInt llvm::APIntOps::RoundDoubleToAPInt(double Double, unsigned width) { // Otherwise, we have to shift the mantissa bits up to the right location APInt Tmp(width, mantissa); - Tmp <<= (unsigned)exp - 52; + Tmp = Tmp.shl((unsigned)exp - 52); return isNeg ? -Tmp : Tmp; } @@ -1128,10 +1128,9 @@ void APInt::lshrSlowCase(unsigned ShiftAmt) { /// Left-shift this APInt by shiftAmt. /// @brief Left-shift function. -APInt &APInt::operator<<=(const APInt &shiftAmt) { +APInt APInt::shl(const APInt &shiftAmt) const { // It's undefined behavior in C to shift by BitWidth or greater. - *this <<= (unsigned)shiftAmt.getLimitedValue(BitWidth); - return *this; + return shl((unsigned)shiftAmt.getLimitedValue(BitWidth)); } void APInt::shlSlowCase(unsigned ShiftAmt) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 3097145..7141e77 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1852,17 +1852,17 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, OpUsefulBits = 1; if (MSB >= Imm) { - OpUsefulBits <<= MSB - Imm + 1; + OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); --OpUsefulBits; // The interesting part will be in the lower part of the result getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was starting at Imm in the argument - OpUsefulBits <<= Imm; + OpUsefulBits = OpUsefulBits.shl(Imm); } else { - OpUsefulBits <<= MSB + 1; + OpUsefulBits = OpUsefulBits.shl(MSB + 1); --OpUsefulBits; // The interesting part will be shifted in the result - OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm; + OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was at zero in the argument OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm); @@ -1892,7 +1892,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { // Shift Left uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); - Mask <<= ShiftAmt; + Mask = Mask.shl(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); Mask.lshrInPlace(ShiftAmt); } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { @@ -1902,7 +1902,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); Mask.lshrInPlace(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); - Mask <<= ShiftAmt; + Mask = Mask.shl(ShiftAmt); } else return; @@ -1930,13 +1930,13 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, uint64_t Width = MSB - Imm + 1; uint64_t LSB = Imm; - OpUsefulBits <<= Width; + OpUsefulBits = OpUsefulBits.shl(Width); --OpUsefulBits; if (Op.getOperand(1) == Orig) { // Copy the low bits from the result to bits starting from LSB. Mask = ResultUsefulBits & OpUsefulBits; - Mask <<= LSB; + Mask = Mask.shl(LSB); } if (Op.getOperand(0) == Orig) @@ -1947,9 +1947,9 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, uint64_t Width = MSB + 1; uint64_t LSB = UsefulBits.getBitWidth() - Imm; - OpUsefulBits <<= Width; + OpUsefulBits = OpUsefulBits.shl(Width); --OpUsefulBits; - OpUsefulBits <<= LSB; + OpUsefulBits = OpUsefulBits.shl(LSB); if (Op.getOperand(1) == Orig) { // Copy the bits from the result to the zero bits. diff --git a/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp b/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp index 39f4398..7189b5a 100644 --- a/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp +++ b/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp @@ -124,7 +124,7 @@ void llvm::HexagonLowerToMC(const MCInstrInfo &MCII, const MachineInstr *MI, // FP immediates are used only when setting GPRs, so they may be dealt // with like regular immediates from this point on. auto Expr = HexagonMCExpr::create( - MCConstantExpr::create(Val.bitcastToAPInt().getZExtValue(), + MCConstantExpr::create(*Val.bitcastToAPInt().getRawData(), AP.OutContext), AP.OutContext); HexagonMCInstrInfo::setMustExtend(*Expr, MustExtend); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d5bc443..e3de304 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -26717,8 +26717,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1); unsigned ShAmt = ShiftImm->getZExtValue(); if (Opc == X86ISD::VSHLI) { - KnownZero <<= ShAmt; - KnownOne <<= ShAmt; + KnownZero = KnownZero << ShAmt; + KnownOne = KnownOne << ShAmt; // Low bits are known zero. KnownZero.setLowBits(ShAmt); } else { @@ -31054,7 +31054,8 @@ static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) { N0.getOperand(1).getOpcode() == ISD::Constant) { SDValue N00 = N0.getOperand(0); APInt Mask = cast(N0.getOperand(1))->getAPIntValue(); - Mask <<= N1C->getAPIntValue(); + const APInt &ShAmt = N1C->getAPIntValue(); + Mask = Mask.shl(ShAmt); bool MaskOK = false; // We can handle cases concerning bit-widening nodes containing setcc_c if // we carefully interrogate the mask to make sure we are semantics @@ -31264,9 +31265,9 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG, unsigned ShiftImm = ShiftVal.getZExtValue(); for (APInt &Elt : EltBits) { if (X86ISD::VSHLI == Opcode) - Elt <<= ShiftImm; + Elt = Elt.shl(ShiftImm); else if (X86ISD::VSRAI == Opcode) - Elt.ashrInPlace(ShiftImm); + Elt = Elt.ashr(ShiftImm); else Elt.lshrInPlace(ShiftImm); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 20179bc..e7aa1a4 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -378,7 +378,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II, for (unsigned i = 0; i != NumSubElts; ++i) { unsigned SubEltIdx = (NumSubElts - 1) - i; auto SubElt = cast(CDV->getElementAsConstant(SubEltIdx)); - Count <<= BitWidth; + Count = Count.shl(BitWidth); Count |= SubElt->getValue().zextOrTrunc(64); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index c5812ab..2f6e411 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -1562,7 +1562,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); LaneElts = LaneElts.getLoBits(InnerVWidthPerLane); - LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum); + LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum)); UndefElts |= LaneElts; } }