From b9810988b23c96d1628371868336cb27e2daacca Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Tue, 31 Mar 2020 09:43:50 +0000 Subject: [PATCH] [Alignment][NFC] Transitionning more getMachineMemOperand call sites Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D77127 --- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 40 +++++++++++----------- llvm/lib/CodeGen/MIRParser/MIParser.cpp | 4 +-- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp | 12 +++---- .../CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | 27 +++++++-------- llvm/lib/CodeGen/TargetInstrInfo.cpp | 6 ++-- llvm/lib/CodeGen/TargetLoweringBase.cpp | 2 +- llvm/lib/Target/AArch64/AArch64FastISel.cpp | 4 +-- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 12 +++---- llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp | 26 +++++++------- .../lib/Target/AArch64/AArch64SelectionDAGInfo.cpp | 2 +- llvm/lib/Target/ARC/ARCInstrInfo.cpp | 8 ++--- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 12 +++---- llvm/lib/Target/ARM/ARMCallLowering.cpp | 2 +- llvm/lib/Target/ARM/ARMFastISel.cpp | 19 ++++------ llvm/lib/Target/ARM/ARMISelLowering.cpp | 12 ++++--- llvm/lib/Target/ARM/ARMInstrInfo.cpp | 2 +- llvm/lib/Target/ARM/Thumb1InstrInfo.cpp | 4 +-- llvm/lib/Target/ARM/Thumb2InstrInfo.cpp | 4 +-- 18 files changed, 96 insertions(+), 102 deletions(-) diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 98fbdd1..58888c4 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -883,12 +883,12 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); - unsigned BaseAlign = getMemOpAlignment(LI); + Align BaseAlign = getMemOpAlign(LI); AAMDNodes AAMetadata; LI.getAAMetadata(AAMetadata); auto MMO = MF->getMachineMemOperand( - Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, - MinAlign(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, + Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), + commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, LI.getSyncScopeID(), LI.getOrdering()); MIRBuilder.buildLoad(Regs[i], Addr, *MMO); } @@ -925,12 +925,12 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); - unsigned BaseAlign = getMemOpAlignment(SI); + Align BaseAlign = getMemOpAlign(SI); AAMDNodes AAMetadata; SI.getAAMetadata(AAMetadata); auto MMO = MF->getMachineMemOperand( - Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, - MinAlign(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, + Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), + commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, SI.getSyncScopeID(), SI.getOrdering()); MIRBuilder.buildStore(Vals[i], Addr, *MMO); } @@ -1145,20 +1145,21 @@ bool IRTranslator::translateMemFunc(const CallInst &CI, for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) ICall.addUse(getOrCreateVReg(**AI)); - unsigned DstAlign = 0, SrcAlign = 0; + Align DstAlign; + Align SrcAlign; unsigned IsVol = cast(CI.getArgOperand(CI.getNumArgOperands() - 1)) ->getZExtValue(); if (auto *MCI = dyn_cast(&CI)) { - DstAlign = std::max(MCI->getDestAlignment(), 1); - SrcAlign = std::max(MCI->getSourceAlignment(), 1); + DstAlign = MCI->getDestAlign().valueOrOne(); + SrcAlign = MCI->getSourceAlign().valueOrOne(); } else if (auto *MMI = dyn_cast(&CI)) { - DstAlign = std::max(MMI->getDestAlignment(), 1); - SrcAlign = std::max(MMI->getSourceAlignment(), 1); + DstAlign = MMI->getDestAlign().valueOrOne(); + SrcAlign = MMI->getSourceAlign().valueOrOne(); } else { auto *MSI = cast(&CI); - DstAlign = std::max(MSI->getDestAlignment(), 1); + DstAlign = MSI->getDestAlign().valueOrOne(); } // We need to propagate the tail call flag from the IR inst as an argument. @@ -1490,7 +1491,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, - PtrTy.getSizeInBits() / 8, 8)); + PtrTy.getSizeInBits() / 8, Align(8))); return true; } case Intrinsic::stacksave: { @@ -2021,11 +2022,10 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U, MIRBuilder.buildAtomicCmpXchgWithSuccess( OldValRes, SuccessRes, Addr, Cmp, NewVal, - *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), - Flags, DL->getTypeStoreSize(ValType), - getMemOpAlignment(I), AAMetadata, nullptr, - I.getSyncScopeID(), I.getSuccessOrdering(), - I.getFailureOrdering())); + *MF->getMachineMemOperand( + MachinePointerInfo(I.getPointerOperand()), Flags, + DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, + I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); return true; } @@ -2093,8 +2093,8 @@ bool IRTranslator::translateAtomicRMW(const User &U, Opcode, Res, Addr, Val, *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, DL->getTypeStoreSize(ResType), - getMemOpAlignment(I), AAMetadata, - nullptr, I.getSyncScopeID(), I.getOrdering())); + getMemOpAlign(I), AAMetadata, nullptr, + I.getSyncScopeID(), I.getOrdering())); return true; } diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 689e5af..c948fba 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -3095,8 +3095,8 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) { } if (expectAndConsume(MIToken::rparen)) return true; - Dest = MF.getMachineMemOperand(Ptr, Flags, Size, BaseAlignment, AAInfo, Range, - SSID, Order, FailureOrder); + Dest = MF.getMachineMemOperand(Ptr, Flags, Size, Align(BaseAlignment), AAInfo, + Range, SSID, Order, FailureOrder); return false; } diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 80069e5..7ed8097 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -2433,18 +2433,18 @@ MachineMemOperand * FastISel::createMachineMemOperandFor(const Instruction *I) const { const Value *Ptr; Type *ValTy; - unsigned Alignment; + MaybeAlign Alignment; MachineMemOperand::Flags Flags; bool IsVolatile; if (const auto *LI = dyn_cast(I)) { - Alignment = LI->getAlignment(); + Alignment = LI->getAlign(); IsVolatile = LI->isVolatile(); Flags = MachineMemOperand::MOLoad; Ptr = LI->getPointerOperand(); ValTy = LI->getType(); } else if (const auto *SI = dyn_cast(I)) { - Alignment = SI->getAlignment(); + Alignment = SI->getAlign(); IsVolatile = SI->isVolatile(); Flags = MachineMemOperand::MOStore; Ptr = SI->getPointerOperand(); @@ -2460,8 +2460,8 @@ FastISel::createMachineMemOperandFor(const Instruction *I) const { AAMDNodes AAInfo; I->getAAMetadata(AAInfo); - if (Alignment == 0) // Ensure that codegen never sees alignment 0. - Alignment = DL.getABITypeAlignment(ValTy); + if (!Alignment) // Ensure that codegen never sees alignment 0. + Alignment = DL.getABITypeAlign(ValTy); unsigned Size = DL.getTypeStoreSize(ValTy); @@ -2475,7 +2475,7 @@ FastISel::createMachineMemOperandFor(const Instruction *I) const { Flags |= MachineMemOperand::MOInvariant; return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, - Alignment, AAInfo, Ranges); + *Alignment, AAInfo, Ranges); } CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 45c7a4f..c761d4b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -337,8 +337,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) { N->getValueType(0).getVectorElementType(), SDLoc(N), N->getChain(), N->getBasePtr(), DAG.getUNDEF(N->getBasePtr().getValueType()), N->getPointerInfo(), N->getMemoryVT().getVectorElementType(), - N->getOriginalAlign().value(), N->getMemOperand()->getFlags(), - N->getAAInfo()); + N->getOriginalAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); // Legalize the chain result - switch anything that used the old chain to // use the new one. @@ -755,8 +754,8 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){ return DAG.getStore(N->getChain(), dl, GetScalarizedVector(N->getOperand(1)), N->getBasePtr(), N->getPointerInfo(), - N->getOriginalAlign().value(), - N->getMemOperand()->getFlags(), N->getAAInfo()); + N->getOriginalAlign(), N->getMemOperand()->getFlags(), + N->getAAInfo()); } /// If the value to round is a vector that needs to be scalarized, it must be @@ -1525,14 +1524,13 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, } Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset, - LD->getPointerInfo(), LoMemVT, Alignment.value(), MMOFlags, - AAInfo); + LD->getPointerInfo(), LoMemVT, Alignment, MMOFlags, AAInfo); unsigned IncrementSize = LoMemVT.getSizeInBits()/8; Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset, LD->getPointerInfo().getWithOffset(IncrementSize), HiMemVT, - Alignment.value(), MMOFlags, AAInfo); + Alignment, MMOFlags, AAInfo); // Build a factor node to remember that this load is independent of the // other one. @@ -1651,7 +1649,7 @@ void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MGT->getPointerInfo(), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment.value(), MGT->getAAInfo(), + MemoryLocation::UnknownSize, Alignment, MGT->getAAInfo(), MGT->getRanges()); SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale}; @@ -2410,8 +2408,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSCATTER(MaskedScatterSDNode *N, SDValue Lo; MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment.value(), N->getAAInfo(), - N->getRanges()); + MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Scale}; Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(), @@ -2451,10 +2448,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { if (isTruncating) Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), LoMemVT, - Alignment.value(), MMOFlags, AAInfo); + Alignment, MMOFlags, AAInfo); else - Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment.value(), - MMOFlags, AAInfo); + Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment, MMOFlags, + AAInfo); // Increment the pointer to the other half. Ptr = DAG.getObjectPtrOffset(DL, Ptr, IncrementSize); @@ -2462,11 +2459,11 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { if (isTruncating) Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, N->getPointerInfo().getWithOffset(IncrementSize), - HiMemVT, Alignment.value(), MMOFlags, AAInfo); + HiMemVT, Alignment, MMOFlags, AAInfo); else Hi = DAG.getStore(Ch, DL, Hi, Ptr, N->getPointerInfo().getWithOffset(IncrementSize), - Alignment.value(), MMOFlags, AAInfo); + Alignment, MMOFlags, AAInfo); return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); } diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index cc390cf..39c47ce 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -591,9 +591,9 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, NewMI->mayLoad()) && "Folded a use to a non-load!"); assert(MFI.getObjectOffset(FI) != -1); - MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize, - MFI.getObjectAlignment(FI)); + MachineMemOperand *MMO = + MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), + Flags, MemSize, MFI.getObjectAlign(FI)); NewMI->addMemOperand(MF, MMO); return NewMI; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index a84b9d1..69db1d9 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1066,7 +1066,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, auto Flags = MachineMemOperand::MOLoad; MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), Flags, - MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI)); + MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); MIB->addMemOperand(MF, MMO); } diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 7e9c68f..c7cfccd 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -1130,7 +1130,7 @@ void AArch64FastISel::addLoadStoreOperands(Address &Addr, // and alignment should be based on the VT. MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags, - MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); // Now add the rest of the operands. MIB.addFrameIndex(FI).addImm(Offset); } else { @@ -3137,7 +3137,7 @@ bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI, Addr.setReg(AArch64::SP); Addr.setOffset(VA.getLocMemOffset() + BEAlign); - unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); + Align Alignment = DL.getABITypeAlign(ArgVal->getType()); MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()), MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 436b26c..718012e 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2922,11 +2922,11 @@ void AArch64InstrInfo::storeRegToStackSlot( const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); - MachineMemOperand *MMO = MF.getMachineMemOperand( - PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align); + MachineMemOperand *MMO = + MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); unsigned Opc = 0; bool Offset = true; switch (TRI->getSpillSize(*RC)) { @@ -3064,10 +3064,10 @@ void AArch64InstrInfo::loadRegFromStackSlot( const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); - MachineMemOperand *MMO = MF.getMachineMemOperand( - PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align); + MachineMemOperand *MMO = + MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); unsigned Opc = 0; bool Offset = true; diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp index d3d43ad..60ccb36 100644 --- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -724,7 +724,7 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const { MIRBuilder.setInstr(MI); MachineFunction &MF = MIRBuilder.getMF(); - unsigned Align = MI.getOperand(2).getImm(); + Align Alignment(MI.getOperand(2).getImm()); Register Dst = MI.getOperand(0).getReg(); Register ListPtr = MI.getOperand(1).getReg(); @@ -732,19 +732,19 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI, LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); const unsigned PtrSize = PtrTy.getSizeInBits() / 8; + const Align PtrAlign = Align(PtrSize); auto List = MIRBuilder.buildLoad( PtrTy, ListPtr, *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad, - PtrSize, /* Align = */ PtrSize)); + PtrSize, PtrAlign)); MachineInstrBuilder DstPtr; - if (Align > PtrSize) { + if (Alignment > PtrAlign) { // Realign the list to the actual required alignment. - auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1); - + auto AlignMinus1 = + MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1); auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0)); - - DstPtr = MIRBuilder.buildPtrMask(PtrTy, ListTmp, Log2_64(Align)); + DstPtr = MIRBuilder.buildPtrMask(PtrTy, ListTmp, Log2(Alignment)); } else DstPtr = List; @@ -752,16 +752,16 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI, MIRBuilder.buildLoad( Dst, DstPtr, *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad, - ValSize, std::max(Align, PtrSize))); + ValSize, std::max(Alignment, PtrAlign))); - auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrSize)); + auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign)); auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0)); - MIRBuilder.buildStore( - NewList, ListPtr, - *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore, - PtrSize, /* Align = */ PtrSize)); + MIRBuilder.buildStore(NewList, ListPtr, + *MF.getMachineMemOperand(MachinePointerInfo(), + MachineMemOperand::MOStore, + PtrSize, PtrAlign)); MI.eraseFromParent(); return true; diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp index 65c089a..ea7b87f 100644 --- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp @@ -117,7 +117,7 @@ SDValue AArch64SelectionDAGInfo::EmitTargetCodeForSetTag( MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *BaseMemOperand = MF.getMachineMemOperand( - DstPtrInfo, MachineMemOperand::MOStore, ObjSize, 16); + DstPtrInfo, MachineMemOperand::MOStore, ObjSize, Align(16)); bool UseSetTagRangeLoop = kSetTagLoopThreshold >= 0 && (int)ObjSize >= kSetTagLoopThreshold; diff --git a/llvm/lib/Target/ARC/ARCInstrInfo.cpp b/llvm/lib/Target/ARC/ARCInstrInfo.cpp index 8261655..527f239 100644 --- a/llvm/lib/Target/ARC/ARCInstrInfo.cpp +++ b/llvm/lib/Target/ARC/ARCInstrInfo.cpp @@ -299,11 +299,11 @@ void ARCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, DebugLoc dl = MBB.findDebugLoc(I); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FrameIndex); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIndex), - MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex), Align); + MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex), + MFI.getObjectAlign(FrameIndex)); assert(MMO && "Couldn't get MachineMemOperand for store to stack."); assert(TRI->getSpillSize(*RC) == 4 && @@ -327,10 +327,10 @@ void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, DebugLoc dl = MBB.findDebugLoc(I); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FrameIndex); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIndex), - MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), Align); + MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), + MFI.getObjectAlign(FrameIndex)); assert(MMO && "Couldn't get MachineMemOperand for store to stack."); assert(TRI->getSpillSize(*RC) == 4 && diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index 3a190b5..8650ca3 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1319,10 +1319,10 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, if (I != MBB.end()) DL = I->getDebugLoc(); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); + const Align Alignment = MFI.getObjectAlign(FI); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Alignment); switch (TRI->getSpillSize(*RC)) { case 2: @@ -1391,7 +1391,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, break; case 16: if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { - if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) .addFrameIndex(FI) .addImm(16) @@ -1415,7 +1415,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, break; case 24: if (ARM::DTripleRegClass.hasSubClassEq(RC)) { - if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && Subtarget.hasNEON()) { BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) .addFrameIndex(FI) @@ -1438,7 +1438,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, break; case 32: if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { - if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && Subtarget.hasNEON()) { BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) .addFrameIndex(FI) @@ -4832,7 +4832,7 @@ void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI, MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant; MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( - MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4); + MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4)); MIB.addMemOperand(MMO).add(predOps(ARMCC::AL)); } diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp index 6ba2c43..6a8ad3f 100644 --- a/llvm/lib/Target/ARM/ARMCallLowering.cpp +++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -130,7 +130,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler { Register ExtReg = extendRegister(ValVReg, VA); auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(), - /* Alignment */ 1); + Align(1)); MIRBuilder.buildStore(ExtReg, Addr, *MMO); } diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 6e19db3..11bff74 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -209,7 +209,7 @@ class ARMFastISel final : public FastISel { unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); unsigned ARMSelectCallOp(bool UseReg); - unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); + unsigned ARMLowerPICELF(const GlobalValue *GV, MVT VT); const TargetLowering *getTargetLowering() { return &TLI; } @@ -570,14 +570,10 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); } else { // MachineConstantPool wants an explicit alignment. - unsigned Align = DL.getPrefTypeAlignment(GV->getType()); - if (Align == 0) { - // TODO: Figure out if this is correct. - Align = DL.getTypeAllocSize(GV->getType()); - } + Align Alignment = DL.getPrefTypeAlign(GV->getType()); if (Subtarget->isTargetELF() && IsPositionIndependent) - return ARMLowerPICELF(GV, Align, VT); + return ARMLowerPICELF(GV, VT); // Grab index. unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; @@ -585,7 +581,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, ARMCP::CPValue, PCAdj); - unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); + unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment.value()); // Load value. MachineInstrBuilder MIB; @@ -882,7 +878,7 @@ void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, int Offset = Addr.Offset; MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags, - MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); // Now add the rest of the operands. MIB.addFrameIndex(FI); @@ -2949,8 +2945,7 @@ bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, return true; } -unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, - unsigned Align, MVT VT) { +unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) { bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); LLVMContext *Context = &MF->getFunction().getContext(); @@ -2966,7 +2961,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign); MachineMemOperand *CPMMO = MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index c49a16d..a2de0ad 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -9768,7 +9768,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), - MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); + MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4)); MachineInstrBuilder MIB; MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); @@ -9899,8 +9899,9 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addReg(NewVReg3) .add(predOps(ARMCC::AL)); - MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( - MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand *JTMMOLd = + MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), + MachineMemOperand::MOLoad, 4, Align(4)); Register NewVReg5 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) @@ -9993,8 +9994,9 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addJumpTableIndex(MJTI) .add(predOps(ARMCC::AL)); - MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( - MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand *JTMMOLd = + MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), + MachineMemOperand::MOLoad, 4, Align(4)); Register NewVReg5 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) .addReg(NewVReg3, RegState::Kill) diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/llvm/lib/Target/ARM/ARMInstrInfo.cpp index a802d5a..2790ac2 100644 --- a/llvm/lib/Target/ARM/ARMInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMInstrInfo.cpp @@ -126,7 +126,7 @@ void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI) const { MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant; MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( - MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4); + MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4)); MIB.addMemOperand(MMO); BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg) .addReg(Reg, RegState::Kill) diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp index 1a054a4..79afa37 100644 --- a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp +++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp @@ -92,7 +92,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineFrameInfo &MFI = MF.getFrameInfo(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); BuildMI(MBB, I, DL, get(ARM::tSTRspi)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI) @@ -121,7 +121,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineFrameInfo &MFI = MF.getFrameInfo(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); BuildMI(MBB, I, DL, get(ARM::tLDRspi), DestReg) .addFrameIndex(FI) .addImm(0) diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp index f4d1107..0719b85 100644 --- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -143,7 +143,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineFrameInfo &MFI = MF.getFrameInfo(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); if (ARM::GPRRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(ARM::t2STRi12)) @@ -183,7 +183,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineFrameInfo &MFI = MF.getFrameInfo(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); -- 2.7.4