From c9d5c1959767d17809157a59c99d5132128bab2a Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Tue, 31 Mar 2020 08:05:00 +0000 Subject: [PATCH] [Alignment][NFC] Transitionning more getMachineMemOperand call sites Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, Jim, kerbowa, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D77121 --- .../CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | 20 ++++++++-------- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 27 +++++++++++----------- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 6 ++--- llvm/lib/Target/AMDGPU/SIFrameLowering.cpp | 20 ++++++++-------- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 ++--- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 13 ++++------- llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 24 +++++++++---------- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 10 ++++---- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 2 +- llvm/lib/Target/ARM/ARMISelLowering.cpp | 22 ++++++++---------- llvm/lib/Target/AVR/AVRInstrInfo.cpp | 4 ++-- llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp | 13 +++++------ llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 8 +++---- llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp | 9 ++++---- llvm/lib/Target/MSP430/MSP430InstrInfo.cpp | 2 +- llvm/lib/Target/Mips/MipsCallLowering.cpp | 10 ++++---- llvm/lib/Target/Mips/MipsFastISel.cpp | 8 +++---- llvm/lib/Target/Mips/MipsInstrInfo.cpp | 4 ++-- llvm/lib/Target/Mips/MipsInstructionSelector.cpp | 10 ++++---- llvm/lib/Target/Mips/MipsLegalizerInfo.cpp | 9 ++++---- llvm/lib/Target/PowerPC/PPCFastISel.cpp | 12 +++++----- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 6 ++--- llvm/lib/Target/PowerPC/PPCISelLowering.h | 2 +- llvm/lib/Target/PowerPC/PPCInstrInfo.cpp | 4 ++-- llvm/test/TableGen/address-space-patfrags.td | 2 +- llvm/utils/TableGen/CodeGenDAGPatterns.cpp | 4 ++-- 26 files changed, 125 insertions(+), 132 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 552406d..45c7a4f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1525,13 +1525,14 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, } Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset, - LD->getPointerInfo(), LoMemVT, Alignment, MMOFlags, AAInfo); + LD->getPointerInfo(), LoMemVT, Alignment.value(), MMOFlags, + AAInfo); unsigned IncrementSize = LoMemVT.getSizeInBits()/8; Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset, LD->getPointerInfo().getWithOffset(IncrementSize), HiMemVT, - Alignment, MMOFlags, AAInfo); + Alignment.value(), MMOFlags, AAInfo); // Build a factor node to remember that this load is independent of the // other one. @@ -1650,7 +1651,7 @@ void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MGT->getPointerInfo(), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment, MGT->getAAInfo(), + MemoryLocation::UnknownSize, Alignment.value(), MGT->getAAInfo(), MGT->getRanges()); SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale}; @@ -2409,7 +2410,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSCATTER(MaskedScatterSDNode *N, SDValue Lo; MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); + MemoryLocation::UnknownSize, Alignment.value(), N->getAAInfo(), + N->getRanges()); SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Scale}; Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(), @@ -2449,10 +2451,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { if (isTruncating) Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), LoMemVT, - Alignment, MMOFlags, AAInfo); + Alignment.value(), MMOFlags, AAInfo); else - Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment, MMOFlags, - AAInfo); + Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment.value(), + MMOFlags, AAInfo); // Increment the pointer to the other half. Ptr = DAG.getObjectPtrOffset(DL, Ptr, IncrementSize); @@ -2460,11 +2462,11 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { if (isTruncating) Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, N->getPointerInfo().getWithOffset(IncrementSize), - HiMemVT, Alignment, MMOFlags, AAInfo); + HiMemVT, Alignment.value(), MMOFlags, AAInfo); else Hi = DAG.getStore(Ch, DL, Hi, Ptr, N->getPointerInfo().getWithOffset(IncrementSize), - Alignment, MMOFlags, AAInfo); + Alignment.value(), MMOFlags, AAInfo); return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index e8c950c..c885696 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -1464,12 +1464,10 @@ Register AMDGPULegalizerInfo::getSegmentAperture( // TODO: can we be smarter about machine pointer info? MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); MachineMemOperand *MMO = MF.getMachineMemOperand( - PtrInfo, - MachineMemOperand::MOLoad | - MachineMemOperand::MODereferenceable | - MachineMemOperand::MOInvariant, - 4, - MinAlign(64, StructOffset)); + PtrInfo, + MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | + MachineMemOperand::MOInvariant, + 4, commonAlignment(Align(64), StructOffset)); Register LoadAddr; @@ -2028,10 +2026,10 @@ bool AMDGPULegalizerInfo::legalizeGlobalValue( Register GOTAddr = MRI.createGenericVirtualRegister(PtrTy); MachineMemOperand *GOTMMO = MF.getMachineMemOperand( - MachinePointerInfo::getGOT(MF), - MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | - MachineMemOperand::MOInvariant, - 8 /*Size*/, 8 /*Align*/); + MachinePointerInfo::getGOT(MF), + MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | + MachineMemOperand::MOInvariant, + 8 /*Size*/, Align(8)); buildPCRelGlobalAddress(GOTAddr, PtrTy, B, GV, 0, SIInstrInfo::MO_GOTPCREL32); @@ -4003,11 +4001,12 @@ bool AMDGPULegalizerInfo::legalizeSBufferLoad( // FIXME: When intrinsic definition is fixed, this should have an MMO already. // TODO: Should this use datalayout alignment? const unsigned MemSize = (Size + 7) / 8; - const unsigned MemAlign = 4; + const Align MemAlign(4); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo(), - MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | - MachineMemOperand::MOInvariant, MemSize, MemAlign); + MachinePointerInfo(), + MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | + MachineMemOperand::MOInvariant, + MemSize, MemAlign); MI.addMemOperand(MF, MMO); // There are no 96-bit result scalar loads, but widening to 128-bit should diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index 8070bcb..adfcecd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1352,7 +1352,7 @@ bool AMDGPURegisterBankInfo::applyMappingSBufferLoad( // Use the alignment to ensure that the required offsets will fit into the // immediate offsets. - const unsigned Align = NumLoads > 1 ? 16 * NumLoads : 1; + const unsigned Alignment = NumLoads > 1 ? 16 * NumLoads : 1; MachineIRBuilder B(MI); MachineFunction &MF = B.getMF(); @@ -1362,12 +1362,12 @@ bool AMDGPURegisterBankInfo::applyMappingSBufferLoad( int64_t ImmOffset = 0; unsigned MMOOffset = setBufferOffsets(B, *this, MI.getOperand(2).getReg(), - VOffset, SOffset, ImmOffset, Align); + VOffset, SOffset, ImmOffset, Alignment); // TODO: 96-bit loads were widened to 128-bit results. Shrink the result if we // can, but we neeed to track an MMO for that. const unsigned MemSize = (Ty.getSizeInBits() + 7) / 8; - const unsigned MemAlign = 4; // FIXME: ABI type alignment? + const Align MemAlign(4); // FIXME: ABI type alignment? MachineMemOperand *BaseMMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index 929ae8c..46515ae 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -94,7 +94,7 @@ static void buildPrologSpill(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB, MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, - MFI.getObjectAlignment(FI)); + MFI.getObjectAlign(FI)); if (isUInt<12>(Offset)) { BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) @@ -141,7 +141,7 @@ static void buildEpilogReload(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB, MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, - MFI.getObjectAlignment(FI)); + MFI.getObjectAlign(FI)); if (isUInt<12>(Offset)) { BuildMI(MBB, I, DebugLoc(), @@ -462,9 +462,9 @@ void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); auto MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | - MachineMemOperand::MOInvariant | - MachineMemOperand::MODereferenceable, - 16, 4); + MachineMemOperand::MOInvariant | + MachineMemOperand::MODereferenceable, + 16, Align(4)); unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; const GCNSubtarget &Subtarget = MF.getSubtarget(); unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); @@ -499,11 +499,11 @@ void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup( const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); - auto MMO = MF.getMachineMemOperand(PtrInfo, - MachineMemOperand::MOLoad | - MachineMemOperand::MOInvariant | - MachineMemOperand::MODereferenceable, - 8, 4); + auto MMO = MF.getMachineMemOperand( + PtrInfo, + MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | + MachineMemOperand::MODereferenceable, + 8, Align(4)); BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) .addReg(MFI->getImplicitBufferPtrUserSGPR()) .addImm(0) // offset diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 3e04e58..e4fff83 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -5705,14 +5705,14 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, MachineFunction &MF = DAG.getMachineFunction(); const DataLayout &DataLayout = DAG.getDataLayout(); - unsigned Align = - DataLayout.getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); + Align Alignment = + DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant, - VT.getStoreSize(), Align); + VT.getStoreSize(), Alignment); if (!Offset->isDivergent()) { SDValue Ops[] = { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 2e0b78b..b8a121b 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1132,13 +1132,11 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineFrameInfo &FrameInfo = MF->getFrameInfo(); const DebugLoc &DL = MBB.findDebugLoc(MI); - unsigned Size = FrameInfo.getObjectSize(FrameIndex); - unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FrameIndex); - MachineMemOperand *MMO - = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, - Size, Align); + MachineMemOperand *MMO = MF->getMachineMemOperand( + PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), + FrameInfo.getObjectAlign(FrameIndex)); unsigned SpillSize = TRI->getSpillSize(*RC); if (RI.isSGPRClass(RC)) { @@ -1260,15 +1258,14 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI = MF->getInfo(); MachineFrameInfo &FrameInfo = MF->getFrameInfo(); const DebugLoc &DL = MBB.findDebugLoc(MI); - unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); - unsigned Size = FrameInfo.getObjectSize(FrameIndex); unsigned SpillSize = TRI->getSpillSize(*RC); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FrameIndex); MachineMemOperand *MMO = MF->getMachineMemOperand( - PtrInfo, MachineMemOperand::MOLoad, Size, Align); + PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), + FrameInfo.getObjectAlign(FrameIndex)); if (RI.isSGPRClass(RC)) { MFI->setHasSpilledSGPRs(); diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 93fde90..39fcec1 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -673,7 +673,7 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI, int64_t Offset = InstOffset + MFI.getObjectOffset(Index); int64_t ScratchOffsetRegDelta = 0; - unsigned Align = MFI.getObjectAlignment(Index); + Align Alignment = MFI.getObjectAlign(Index); const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo(); Register TmpReg = @@ -749,9 +749,9 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI, } MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i); - MachineMemOperand *NewMMO - = MF->getMachineMemOperand(PInfo, MMO->getFlags(), - EltSize, MinAlign(Align, EltSize * i)); + MachineMemOperand *NewMMO = + MF->getMachineMemOperand(PInfo, MMO->getFlags(), EltSize, + commonAlignment(Alignment, EltSize * i)); MIB = BuildMI(*MBB, MI, DL, Desc) .addReg(SubReg, @@ -877,12 +877,12 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, Mov.addReg(SuperReg, RegState::Implicit | SuperKillState); } - unsigned Align = FrameInfo.getObjectAlignment(Index); + Align Alignment = FrameInfo.getObjectAlign(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i); - MachineMemOperand *MMO - = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, - EltSize, MinAlign(Align, EltSize * i)); + MachineMemOperand *MMO = + MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, EltSize, + commonAlignment(Alignment, EltSize * i)); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE)) .addReg(TmpVGPR, RegState::Kill) // src .addFrameIndex(Index) // vaddr @@ -951,14 +951,14 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, // FIXME: We should use S_LOAD_DWORD here for VI. if (!TmpVGPR.isValid()) TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); - unsigned Align = FrameInfo.getObjectAlignment(Index); + Align Alignment = FrameInfo.getObjectAlign(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i); - MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo, - MachineMemOperand::MOLoad, EltSize, - MinAlign(Align, EltSize * i)); + MachineMemOperand *MMO = + MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, EltSize, + commonAlignment(Alignment, EltSize * i)); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpVGPR) .addFrameIndex(Index) // vaddr diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index 3af7dff..3a190b5 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1076,11 +1076,11 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); + Align Alignment = MFI.getObjectAlign(FI); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Alignment); switch (TRI->getSpillSize(*RC)) { case 2: @@ -1150,7 +1150,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, case 16: if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { // Use aligned spills if the stack can be realigned. - if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) .addFrameIndex(FI) .addImm(16) @@ -1178,7 +1178,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, case 24: if (ARM::DTripleRegClass.hasSubClassEq(RC)) { // Use aligned spills if the stack can be realigned. - if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && Subtarget.hasNEON()) { BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo)) .addFrameIndex(FI) @@ -1201,7 +1201,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, break; case 32: if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { - if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && Subtarget.hasNEON()) { // FIXME: It's possible to only store part of the QQ register if the // spilled def has a sub-register index. diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index b334f41..f424f22 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -3400,7 +3400,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) { MachineFunction& MF = CurDAG->getMachineFunction(); MachineMemOperand *MemOp = MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); CurDAG->setNodeMemRefs(cast(ResNode), {MemOp}); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 0c9fff6..c49a16d 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -9578,11 +9578,11 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, // Grab constant pool and fixed stack memory operands. MachineMemOperand *CPMMO = MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); MachineMemOperand *FIMMOSt = MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), - MachineMemOperand::MOStore, 4, 4); + MachineMemOperand::MOStore, 4, Align(4)); // Load the address of the dispatch MBB into the jump buffer. if (isThumb2) { @@ -10233,7 +10233,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, Register dest = MI.getOperand(0).getReg(); Register src = MI.getOperand(1).getReg(); unsigned SizeVal = MI.getOperand(2).getImm(); - unsigned Align = MI.getOperand(3).getImm(); + unsigned Alignment = MI.getOperand(3).getImm(); DebugLoc dl = MI.getDebugLoc(); MachineFunction *MF = BB->getParent(); @@ -10246,17 +10246,17 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, bool IsThumb2 = Subtarget->isThumb2(); bool IsThumb = Subtarget->isThumb(); - if (Align & 1) { + if (Alignment & 1) { UnitSize = 1; - } else if (Align & 2) { + } else if (Alignment & 2) { UnitSize = 2; } else { // Check whether we can use NEON instructions. if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && Subtarget->hasNEON()) { - if ((Align % 16 == 0) && SizeVal >= 16) + if ((Alignment % 16 == 0) && SizeVal >= 16) UnitSize = 16; - else if ((Align % 8 == 0) && SizeVal >= 8) + else if ((Alignment % 8 == 0) && SizeVal >= 8) UnitSize = 8; } // Can't use NEON instructions. @@ -10362,13 +10362,11 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, const Constant *C = ConstantInt::get(Int32Ty, LoopSize); // MachineConstantPool wants an explicit alignment. - unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); - if (Align == 0) - Align = MF->getDataLayout().getTypeAllocSize(C->getType()); - unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); + Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); + unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment.value()); MachineMemOperand *CPMMO = MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); if (IsThumb) BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp index ba12df9..ae268b1 100644 --- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp +++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp @@ -138,7 +138,7 @@ void AVRInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIndex), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex), - MFI.getObjectAlignment(FrameIndex)); + MFI.getObjectAlign(FrameIndex)); unsigned Opcode = 0; if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { @@ -172,7 +172,7 @@ void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIndex), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), - MFI.getObjectAlignment(FrameIndex)); + MFI.getObjectAlign(FrameIndex)); unsigned Opcode = 0; if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp index d41c1e4..9c39d0b 100644 --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -893,7 +893,7 @@ void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &MBB, // Create a dummy memory operand to avoid allocframe from being treated as // a volatile memory reference. auto *MMO = MF.getMachineMemOperand(MachinePointerInfo::getStack(MF, 0), - MachineMemOperand::MOStore, 4, 4); + MachineMemOperand::MOStore, 4, Align(4)); DebugLoc dl = MBB.findDebugLoc(InsertPt); unsigned SP = HRI.getStackRegister(); @@ -1547,12 +1547,11 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized( if (auto *FS = dyn_cast_or_null(PV)) { int FI = FS->getFrameIndex(); if (DealignSlots.count(FI)) { - unsigned A = MFI.getObjectAlignment(FI); - auto *NewMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), - MMO->getFlags(), MMO->getSize(), A, - MMO->getAAInfo(), MMO->getRanges(), - MMO->getSyncScopeID(), MMO->getOrdering(), - MMO->getFailureOrdering()); + auto *NewMMO = MF.getMachineMemOperand( + MMO->getPointerInfo(), MMO->getFlags(), MMO->getSize(), + MFI.getObjectAlign(FI), MMO->getAAInfo(), MMO->getRanges(), + MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getFailureOrdering()); new_memops.push_back(NewMMO); KeepOld = false; continue; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index b403d9c..15f6b0d 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2909,10 +2909,10 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) MachineMemOperand *WideMMO = nullptr; if (MachineMemOperand *MMO = LN->getMemOperand()) { MachineFunction &MF = DAG.getMachineFunction(); - WideMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), MMO->getFlags(), - 2*LoadLen, LoadLen, MMO->getAAInfo(), MMO->getRanges(), - MMO->getSyncScopeID(), MMO->getOrdering(), - MMO->getFailureOrdering()); + WideMMO = MF.getMachineMemOperand( + MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), + MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), + MMO->getOrdering(), MMO->getFailureOrdering()); } SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 1b890ef..58bf3fe 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -916,12 +916,11 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, DebugLoc DL = MBB.findDebugLoc(I); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned SlotAlign = MFI.getObjectAlignment(FI); unsigned KillFlag = getKillRegState(isKill); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), SlotAlign); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io)) @@ -963,11 +962,10 @@ void HexagonInstrInfo::loadRegFromStackSlot( DebugLoc DL = MBB.findDebugLoc(I); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned SlotAlign = MFI.getObjectAlignment(FI); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), SlotAlign); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg) @@ -1373,7 +1371,8 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { static const CrashPseudoSourceValue CrashPSV(*this); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(&CrashPSV), - MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8, 1); + MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8, + Align(1)); BuildMI(MBB, MI, DL, get(Hexagon::PS_loadrdabs), Hexagon::D13) .addImm(0xBADC0FEE) // Misaligned load. .addMemOperand(MMO); diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp index f183145..a07ec4e 100644 --- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -46,7 +46,7 @@ void MSP430InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdx), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MFI.getObjectAlign(FrameIdx)); if (RC == &MSP430::GR16RegClass) BuildMI(MBB, MI, DL, get(MSP430::MOV16mr)) diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp index 0096170..b81ba54 100644 --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -271,8 +271,9 @@ Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA, MachinePointerInfo MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; - unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); - MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align); + Align Alignment = commonAlignment(TFL->getStackAlign(), Offset); + MMO = + MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Alignment); return AddrReg.getReg(0); } @@ -485,9 +486,8 @@ bool MipsCallLowering::lowerFormalArguments( MachinePointerInfo MPO = MachinePointerInfo::getFixedStack(MF, FI); MachineInstrBuilder FrameIndex = MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, RegSize, - /* Alignment */ RegSize); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MPO, MachineMemOperand::MOStore, RegSize, Align(RegSize)); MIRBuilder.buildStore(Copy, FrameIndex, *MMO); } } diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index 80f288a..1b6a75e 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -795,12 +795,11 @@ bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, } if (Addr.isFIBase()) { unsigned FI = Addr.getFI(); - unsigned Align = 4; int64_t Offset = Addr.getOffset(); MachineFrameInfo &MFI = MF->getFrameInfo(); MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Align(4)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addFrameIndex(FI) .addImm(Offset) @@ -846,12 +845,11 @@ bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr, } if (Addr.isFIBase()) { unsigned FI = Addr.getFI(); - unsigned Align = 4; int64_t Offset = Addr.getOffset(); MachineFrameInfo &MFI = MF->getFrameInfo(); MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Align(4)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) .addReg(SrcReg) .addFrameIndex(FI) @@ -1263,7 +1261,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI, Addr.setReg(Mips::SP); Addr.setOffset(VA.getLocMemOffset() + BEAlign); - unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); + Align Alignment = DL.getABITypeAlign(ArgVal->getType()); MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()), MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp index 25bbe59..3eab57c 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -66,10 +66,10 @@ MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI, MachineMemOperand::Flags Flags) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); return MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), - Flags, MFI.getObjectSize(FI), Align); + Flags, MFI.getObjectSize(FI), + MFI.getObjectAlign(FI)); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp index 445ef85..3f63f40 100644 --- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp @@ -393,7 +393,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) { .addUse(DestAddress) .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO) .addMemOperand(MF.getMachineMemOperand( - MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4)); + MachinePointerInfo(), MachineMemOperand::MOLoad, 4, Align(4))); if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI)) return false; @@ -681,7 +681,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) { LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); LWGOT->addMemOperand( MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), - MachineMemOperand::MOLoad, 4, 4)); + MachineMemOperand::MOLoad, 4, Align(4))); if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) return false; @@ -727,9 +727,9 @@ bool MipsInstructionSelector::select(MachineInstr &I) { .addReg(MF.getInfo() ->getGlobalBaseRegForGlobalISel()) .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT) - .addMemOperand( - MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), - MachineMemOperand::MOLoad, 4, 4)); + .addMemOperand(MF.getMachineMemOperand( + MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 4, + Align(4))); } else { MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp index e71ae23..6388e8d 100644 --- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -525,12 +525,13 @@ bool MipsLegalizerInfo::legalizeIntrinsic(MachineInstr &MI, } case Intrinsic::vacopy: { MachinePointerInfo MPO; - auto Tmp = MIRBuilder.buildLoad(LLT::pointer(0, 32), MI.getOperand(2), - *MI.getMF()->getMachineMemOperand( - MPO, MachineMemOperand::MOLoad, 4, 4)); + auto Tmp = + MIRBuilder.buildLoad(LLT::pointer(0, 32), MI.getOperand(2), + *MI.getMF()->getMachineMemOperand( + MPO, MachineMemOperand::MOLoad, 4, Align(4))); MIRBuilder.buildStore(Tmp, MI.getOperand(1), *MI.getMF()->getMachineMemOperand( - MPO, MachineMemOperand::MOStore, 4, 4)); + MPO, MachineMemOperand::MOStore, 4, Align(4))); MI.eraseFromParent(); return true; } diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index d8425d8..6215ddb 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -536,7 +536,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, MachinePointerInfo::getFixedStack(*FuncInfo.MF, Addr.Base.FI, Addr.Offset), MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI), - MFI.getObjectAlignment(Addr.Base.FI)); + MFI.getObjectAlign(Addr.Base.FI)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO); @@ -682,7 +682,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) { MachinePointerInfo::getFixedStack(*FuncInfo.MF, Addr.Base.FI, Addr.Offset), MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI), - MFI.getObjectAlignment(Addr.Base.FI)); + MFI.getObjectAlign(Addr.Base.FI)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) .addReg(SrcReg) @@ -1996,9 +1996,9 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) { return 0; // All FP constants are loaded from the constant pool. - unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); - assert(Align > 0 && "Unexpectedly missing alignment information!"); - unsigned Idx = MCP.getConstantPoolIndex(cast(CFP), Align); + Align Alignment = DL.getPrefTypeAlign(CFP->getType()); + unsigned Idx = + MCP.getConstantPoolIndex(cast(CFP), Alignment.value()); const bool HasSPE = PPCSubTarget->hasSPE(); const TargetRegisterClass *RC; if (HasSPE) @@ -2011,7 +2011,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) { MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getConstantPool(*FuncInfo.MF), - MachineMemOperand::MOLoad, (VT == MVT::f32) ? 4 : 8, Align); + MachineMemOperand::MOLoad, (VT == MVT::f32) ? 4 : 8, Alignment); unsigned Opc; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 18e8ded..6f596f8 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8054,7 +8054,7 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, RLI.MPI = LD->getPointerInfo(); RLI.IsDereferenceable = LD->isDereferenceable(); RLI.IsInvariant = LD->isInvariant(); - RLI.Alignment = LD->getAlignment(); + RLI.Alignment = LD->getAlign(); RLI.AAInfo = LD->getAAInfo(); RLI.Ranges = LD->getRanges(); @@ -8380,7 +8380,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, RLI.Chain = Store; RLI.MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); - RLI.Alignment = 4; + RLI.Alignment = Align(4); MachineMemOperand *MMO = MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, @@ -8432,7 +8432,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, RLI.Chain = Store; RLI.MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); - RLI.Alignment = 4; + RLI.Alignment = Align(4); } MachineMemOperand *MMO = diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 70bf4fb..4107d2b 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -991,7 +991,7 @@ namespace llvm { MachinePointerInfo MPI; bool IsDereferenceable = false; bool IsInvariant = false; - unsigned Alignment = 0; + Align Alignment; AAMDNodes AAInfo; const MDNode *Ranges = nullptr; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index f56df56..670c0ce 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -1238,7 +1238,7 @@ void PPCInstrInfo::storeRegToStackSlotNoUpd( MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdx), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MFI.getObjectAlign(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } @@ -1301,7 +1301,7 @@ void PPCInstrInfo::loadRegFromStackSlotNoUpd( MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdx), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MFI.getObjectAlign(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } diff --git a/llvm/test/TableGen/address-space-patfrags.td b/llvm/test/TableGen/address-space-patfrags.td index cf31294..232cf79 100644 --- a/llvm/test/TableGen/address-space-patfrags.td +++ b/llvm/test/TableGen/address-space-patfrags.td @@ -81,7 +81,7 @@ def : Pat < // SDAG-NEXT: if (AddrSpace != 999) // SDAG-NEXT: return false; -// SDAG-NEXT: if (cast(N)->getAlignment() < 2) +// SDAG-NEXT: if (cast(N)->getAlign() < Align(2)) // SDAG-NEXT: return false; // SDAG-NEXT: if (cast(N)->getMemoryVT() != MVT::i32) return false; // SDAG-NEXT: return true; diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp index db61139..6fdc116 100644 --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -999,9 +999,9 @@ std::string TreePredicateFn::getPredCode() const { int64_t MinAlign = getMinAlignment(); if (MinAlign > 0) { - Code += "if (cast(N)->getAlignment() < "; + Code += "if (cast(N)->getAlign() < Align("; Code += utostr(MinAlign); - Code += ")\nreturn false;\n"; + Code += "))\nreturn false;\n"; } Record *MemoryVT = getMemoryVT(); -- 2.7.4