From: Craig Topper Date: Sun, 23 Apr 2017 05:18:31 +0000 (+0000) Subject: [APInt] Use operator<<= instead of shl where possible. NFC X-Git-Tag: llvmorg-5.0.0-rc1~6934 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5f68af08066b28be2cb12befe0067cbafa0f8ac2;p=platform%2Fupstream%2Fllvm.git [APInt] Use operator<<= instead of shl where possible. NFC llvm-svn: 301103 --- diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h index 022cfc9..0e6d2c9 100644 --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -879,6 +879,13 @@ public: return *this; } + /// \brief Left-shift assignment function. + /// + /// Shifts *this left by shiftAmt and assigns the result to *this. + /// + /// \returns *this after shifting left by ShiftAmt + APInt &operator<<=(const APInt &ShiftAmt); + /// @} /// \name Binary Operators /// @{ @@ -986,7 +993,11 @@ public: /// \brief Left-shift function. /// /// Left-shift this APInt by shiftAmt. - APInt shl(const APInt &shiftAmt) const; + APInt shl(const APInt &ShiftAmt) const { + APInt R(*this); + R <<= ShiftAmt; + return R; + } /// \brief Rotate left by rotateAmt. APInt rotl(const APInt &rotateAmt) const; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 4702d63..80df6b8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5343,7 +5343,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1); SDValue Shift; if (c2 > c1) { - Mask = Mask.shl(c2 - c1); + Mask <<= c2 - c1; SDLoc DL(N); Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), DAG.getConstant(c2 - c1, DL, N1.getValueType())); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 3bae3bf..1961e28 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2589,7 +2589,7 @@ SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) { DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); APInt Shift(Sz, 1); - Shift = Shift.shl(J); + Shift <<= J; Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); } diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index 10b4e98..9684443 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1565,7 +1565,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, Tmp = Tmp.zext(SrcBitSize); Tmp = TempSrc.AggregateVal[SrcElt++].IntVal; Tmp = Tmp.zext(DstBitSize); - Tmp = Tmp.shl(ShiftAmt); + Tmp <<= ShiftAmt; ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; Elt.IntVal |= Tmp; } diff --git a/llvm/lib/Support/APInt.cpp b/llvm/lib/Support/APInt.cpp index c72e3bf..5f3358d 100644 --- a/llvm/lib/Support/APInt.cpp +++ b/llvm/lib/Support/APInt.cpp @@ -844,7 +844,7 @@ APInt llvm::APIntOps::RoundDoubleToAPInt(double Double, unsigned width) { // Otherwise, we have to shift the mantissa bits up to the right location APInt Tmp(width, mantissa); - Tmp = Tmp.shl((unsigned)exp - 52); + Tmp <<= (unsigned)exp - 52; return isNeg ? -Tmp : Tmp; } @@ -1081,9 +1081,10 @@ void APInt::lshrSlowCase(unsigned ShiftAmt) { /// Left-shift this APInt by shiftAmt. /// @brief Left-shift function. -APInt APInt::shl(const APInt &shiftAmt) const { +APInt &APInt::operator<<=(const APInt &shiftAmt) { // It's undefined behavior in C to shift by BitWidth or greater. - return shl((unsigned)shiftAmt.getLimitedValue(BitWidth)); + *this <<= (unsigned)shiftAmt.getLimitedValue(BitWidth); + return *this; } void APInt::shlSlowCase(unsigned ShiftAmt) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 7141e77..3097145 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1852,17 +1852,17 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, OpUsefulBits = 1; if (MSB >= Imm) { - OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); + OpUsefulBits <<= MSB - Imm + 1; --OpUsefulBits; // The interesting part will be in the lower part of the result getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was starting at Imm in the argument - OpUsefulBits = OpUsefulBits.shl(Imm); + OpUsefulBits <<= Imm; } else { - OpUsefulBits = OpUsefulBits.shl(MSB + 1); + OpUsefulBits <<= MSB + 1; --OpUsefulBits; // The interesting part will be shifted in the result - OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); + OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm; getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was at zero in the argument OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm); @@ -1892,7 +1892,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { // Shift Left uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); - Mask = Mask.shl(ShiftAmt); + Mask <<= ShiftAmt; getUsefulBits(Op, Mask, Depth + 1); Mask.lshrInPlace(ShiftAmt); } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { @@ -1902,7 +1902,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); Mask.lshrInPlace(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); - Mask = Mask.shl(ShiftAmt); + Mask <<= ShiftAmt; } else return; @@ -1930,13 +1930,13 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, uint64_t Width = MSB - Imm + 1; uint64_t LSB = Imm; - OpUsefulBits = OpUsefulBits.shl(Width); + OpUsefulBits <<= Width; --OpUsefulBits; if (Op.getOperand(1) == Orig) { // Copy the low bits from the result to bits starting from LSB. Mask = ResultUsefulBits & OpUsefulBits; - Mask = Mask.shl(LSB); + Mask <<= LSB; } if (Op.getOperand(0) == Orig) @@ -1947,9 +1947,9 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, uint64_t Width = MSB + 1; uint64_t LSB = UsefulBits.getBitWidth() - Imm; - OpUsefulBits = OpUsefulBits.shl(Width); + OpUsefulBits <<= Width; --OpUsefulBits; - OpUsefulBits = OpUsefulBits.shl(LSB); + OpUsefulBits <<= LSB; if (Op.getOperand(1) == Orig) { // Copy the bits from the result to the zero bits. diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index a664cb1..5c2b134 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -31054,8 +31054,7 @@ static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) { N0.getOperand(1).getOpcode() == ISD::Constant) { SDValue N00 = N0.getOperand(0); APInt Mask = cast(N0.getOperand(1))->getAPIntValue(); - const APInt &ShAmt = N1C->getAPIntValue(); - Mask = Mask.shl(ShAmt); + Mask <<= N1C->getAPIntValue(); bool MaskOK = false; // We can handle cases concerning bit-widening nodes containing setcc_c if // we carefully interrogate the mask to make sure we are semantics diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index e7aa1a4..20179bc 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -378,7 +378,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II, for (unsigned i = 0; i != NumSubElts; ++i) { unsigned SubEltIdx = (NumSubElts - 1) - i; auto SubElt = cast(CDV->getElementAsConstant(SubEltIdx)); - Count = Count.shl(BitWidth); + Count <<= BitWidth; Count |= SubElt->getValue().zextOrTrunc(64); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 2f6e411..c5812ab 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -1562,7 +1562,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); LaneElts = LaneElts.getLoBits(InnerVWidthPerLane); - LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum)); + LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum); UndefElts |= LaneElts; } }