ArgChains.push_back(Chain);
// Add a chain value for each stack argument corresponding
- for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
- UE = DAG.getEntryNode().getNode()->use_end();
- U != UE; ++U)
- if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
+ for (SDNode *U : DAG.getEntryNode().getNode()->uses())
+ if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
if (FI->getIndex() < 0) {
int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
ArgChains.push_back(Chain);
// Add a chain value for each stack argument corresponding
- for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
- UE = DAG.getEntryNode().getNode()->use_end();
- U != UE; ++U) {
- if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
+ for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
+ if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
if (FI->getIndex() < 0) {
int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
SDNode *VMov = Copy;
// f64 returned in a pair of GPRs.
SmallPtrSet<SDNode*, 2> Copies;
- for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
- UI != UE; ++UI) {
- if (UI->getOpcode() != ISD::CopyToReg)
+ for (SDNode *U : VMov->uses()) {
+ if (U->getOpcode() != ISD::CopyToReg)
return false;
- Copies.insert(*UI);
+ Copies.insert(U);
}
if (Copies.size() > 2)
return false;
- for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
- UI != UE; ++UI) {
- SDValue UseChain = UI->getOperand(0);
+ for (SDNode *U : VMov->uses()) {
+ SDValue UseChain = U->getOperand(0);
if (Copies.count(UseChain.getNode()))
// Second CopyToReg
- Copy = *UI;
+ Copy = U;
else {
// We are at the top of this chain.
// If the copy has a glue operand, we conservatively assume it
// isn't safe to perform a tail call.
- if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
+ if (U->getOperand(U->getNumOperands() - 1).getValueType() == MVT::Glue)
return false;
// First CopyToReg
TCChain = UseChain;
}
bool HasRet = false;
- for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
- UI != UE; ++UI) {
- if (UI->getOpcode() != ARMISD::RET_FLAG &&
- UI->getOpcode() != ARMISD::INTRET_FLAG)
+ for (const SDNode *U : Copy->uses()) {
+ if (U->getOpcode() != ARMISD::RET_FLAG &&
+ U->getOpcode() != ARMISD::INTRET_FLAG)
return false;
HasRet = true;
}
//
int numUses = 0;
int nonAddCount = 0;
- for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
- UE = N0.getNode()->use_end();
- UI != UE; ++UI) {
+ for (const SDNode *User : N0.getNode()->uses()) {
numUses++;
- SDNode *User = *UI;
if (User->getOpcode() != ISD::FADD)
++nonAddCount;
}
opIsLive = true;
if (!opIsLive)
- for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
+ for (const SDNode *User : left->uses()) {
int orderNo3 = User->getIROrder();
if (orderNo3 > orderNo) {
opIsLive = true;
}
if (!opIsLive)
- for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
+ for (const SDNode *User : right->uses()) {
int orderNo3 = User->getIROrder();
if (orderNo3 > orderNo) {
opIsLive = true;
Comparison &C) {
if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
C.CCMask == SystemZ::CCMASK_CMP_NE) {
- for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
- SDNode *N = *I;
+ for (SDNode *N : C.Op0->uses()) {
if (N->getOpcode() == ISD::SUB &&
((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
(N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
return;
auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
if (C1 && C1->isZero()) {
- for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
- SDNode *N = *I;
+ for (SDNode *N : C.Op0->uses()) {
if (N->getOpcode() == ISD::FNEG) {
C.Op0 = SDValue(N, 0);
C.CCMask = SystemZ::reverseCCMask(C.CCMask);
if (C1 && C1->getZExtValue() == 32) {
SDValue ShlOp0 = C.Op0.getOperand(0);
// See whether X has any SIGN_EXTEND_INREG uses.
- for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
- SDNode *N = *I;
+ for (SDNode *N : ShlOp0->uses()) {
if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
C.Op0 = SDValue(N, 0);
case ISD::CopyToReg:
// Check all use of selections, bit operations, and copies. If all of them
// are safe, optimize truncate to extract_subreg.
- for (SDNode::use_iterator UI = User->use_begin(), UE = User->use_end();
- UI != UE; ++UI) {
- switch ((*UI)->getOpcode()) {
+ for (const SDNode *U : User->uses()) {
+ switch (U->getOpcode()) {
default:
// If the use is an instruction which treats the source operand as i32,
// it is safe to avoid truncate here.
return SDValue();
// Check all use of this TRUNCATE.
- for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE;
- ++UI) {
- SDNode *User = *UI;
-
+ for (const SDNode *User : N->uses()) {
// Make sure that we're not going to replace TRUNCATE for non i32
// instructions.
//
return false;
// Walk all the users of the immediate.
- for (SDNode::use_iterator UI = N->use_begin(),
- UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
-
- SDNode *User = *UI;
+ for (const SDNode *User : N->uses()) {
+ if (UseCount >= 2)
+ break;
// This user is already selected. Count it as a legitimate use and
// move on.
return false;
bool HasRet = false;
- for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
- UI != UE; ++UI) {
- if (UI->getOpcode() != X86ISD::RET_FLAG)
+ for (const SDNode *U : Copy->uses()) {
+ if (U->getOpcode() != X86ISD::RET_FLAG)
return false;
// If we are returning more than one value, we can definitely
// not make a tail call see PR19530
- if (UI->getNumOperands() > 4)
+ if (U->getNumOperands() > 4)
return false;
- if (UI->getNumOperands() == 4 &&
- UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
+ if (U->getNumOperands() == 4 &&
+ U->getOperand(U->getNumOperands() - 1).getValueType() != MVT::Glue)
return false;
HasRet = true;
}
(VT == MVT::f16 && Subtarget.hasFP16())) {
bool ExpectingFlags = false;
// Check for any users that want flags:
- for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
- !ExpectingFlags && UI != UE; ++UI)
- switch (UI->getOpcode()) {
+ for (const SDNode *U : N->uses()) {
+ if (ExpectingFlags)
+ break;
+
+ switch (U->getOpcode()) {
default:
case ISD::BR_CC:
case ISD::BRCOND:
case ISD::ANY_EXTEND:
break;
}
+ }
if (!ExpectingFlags) {
enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
static bool needCarryOrOverflowFlag(SDValue Flags) {
assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
- for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
- UI != UE; ++UI) {
- SDNode *User = *UI;
-
+ for (const SDNode *User : Flags->uses()) {
X86::CondCode CC;
switch (User->getOpcode()) {
default:
static bool onlyZeroFlagUsed(SDValue Flags) {
assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
- for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
- UI != UE; ++UI) {
- SDNode *User = *UI;
-
+ for (const SDNode *User : Flags->uses()) {
unsigned CCOpNo;
switch (User->getOpcode()) {
default: