// processMachineBasicBlock - Main optimzation loop.
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
- MachineInstr &MI = *I++;
+ for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
if (isProfitableToTransform(MI)) {
transformInstruction(MI);
Changed = true;
}
void AArch64StackTaggingPreRA::uncheckUsesOf(unsigned TaggedReg, int FI) {
- for (auto UI = MRI->use_instr_begin(TaggedReg), E = MRI->use_instr_end();
- UI != E;) {
- MachineInstr *UseI = &*(UI++);
- if (isUncheckedLoadOrStoreOpcode(UseI->getOpcode())) {
+ for (MachineInstr &UseI :
+ llvm::make_early_inc_range(MRI->use_instructions(TaggedReg))) {
+ if (isUncheckedLoadOrStoreOpcode(UseI.getOpcode())) {
// FI operand is always the one before the immediate offset.
- unsigned OpIdx = TII->getLoadStoreImmIdx(UseI->getOpcode()) - 1;
- if (UseI->getOperand(OpIdx).isReg() &&
- UseI->getOperand(OpIdx).getReg() == TaggedReg) {
- UseI->getOperand(OpIdx).ChangeToFrameIndex(FI);
- UseI->getOperand(OpIdx).setTargetFlags(AArch64II::MO_TAGGED);
+ unsigned OpIdx = TII->getLoadStoreImmIdx(UseI.getOpcode()) - 1;
+ if (UseI.getOperand(OpIdx).isReg() &&
+ UseI.getOperand(OpIdx).getReg() == TaggedReg) {
+ UseI.getOperand(OpIdx).ChangeToFrameIndex(FI);
+ UseI.getOperand(OpIdx).setTargetFlags(AArch64II::MO_TAGGED);
}
- } else if (UseI->isCopy() &&
- Register::isVirtualRegister(UseI->getOperand(0).getReg())) {
- uncheckUsesOf(UseI->getOperand(0).getReg(), FI);
+ } else if (UseI.isCopy() &&
+ Register::isVirtualRegister(UseI.getOperand(0).getReg())) {
+ uncheckUsesOf(UseI.getOperand(0).getReg(), FI);
}
}
}
bool Changed = false;
for (auto &BB : F)
- for (auto BI = BB.begin(), BE = BB.end(); BI != BE; /*EMPTY*/) {
- Instruction *I = &*BI++;
- Changed |= visit(*I);
- }
+ for (Instruction &I : llvm::make_early_inc_range(BB))
+ Changed |= visit(I);
return Changed;
}
Intrinsic::ID ID = F.getIntrinsicID();
bool Changed = false;
- for (auto I = F.user_begin(), E = F.user_end(); I != E;) {
- Instruction *Inst = cast<Instruction>(*I);
- ++I;
+ for (User *U : llvm::make_early_inc_range(F.users())) {
+ Instruction *Inst = cast<Instruction>(U);
switch (ID) {
case Intrinsic::memcpy: {
bool Changed = false;
for (auto &MBB : MF) {
- for (auto I = MBB.rbegin(), E = MBB.rend(); I != E;) {
- auto &MI = *I++;
+ for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) {
if (MI.getOpcode() == AMDGPU::V_MOV_B32_dpp && combineDPPMov(MI)) {
Changed = true;
++NumDPPMovsCombined;
bool SeenDbgInstr = false;
for (MachineBasicBlock &MBB : MF) {
- MachineBasicBlock::iterator Next;
- for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
- MachineInstr &MI = *I;
- Next = std::next(I);
-
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
if (MI.isDebugInstr())
SeenDbgInstr = true;
bool MadeChange = false;
for (MachineBasicBlock &MBB : MF) {
- MachineBasicBlock::iterator I, Next;
- for (I = MBB.begin(); I != MBB.end(); I = Next) {
- Next = std::next(I);
- MachineInstr &MI = *I;
-
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
switch (MI.getOpcode()) {
case AMDGPU::S_BRANCH:
// Optimize out branches to the next block.
BitVector SpillFIs(MFI.getObjectIndexEnd(), false);
for (MachineBasicBlock &MBB : MF) {
- MachineBasicBlock::iterator Next;
- for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
- MachineInstr &MI = *I;
- Next = std::next(I);
-
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
if (!TII->isSGPRSpill(MI))
continue;