From 8dbafd24d6dad9ace4447084a517823ea1d6e6b4 Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Thu, 2 Jul 2020 11:28:01 +0000 Subject: [PATCH] [Alignment][NFC] Transition and simplify calls to DL::getABITypeAlignment This patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Differential Revision: https://reviews.llvm.org/D82977 --- llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 6 +++--- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 13 +++++++------ llvm/lib/Target/X86/X86FastISel.cpp | 13 +++---------- llvm/lib/Transforms/IPO/GlobalOpt.cpp | 9 ++++----- llvm/lib/Transforms/IPO/Inliner.cpp | 18 ++++-------------- llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp | 4 ++-- .../Transforms/Instrumentation/DataFlowSanitizer.cpp | 12 +++--------- .../Transforms/Instrumentation/MemorySanitizer.cpp | 2 +- llvm/lib/Transforms/Scalar/LICM.cpp | 17 ++++++----------- llvm/lib/Transforms/Scalar/SROA.cpp | 2 +- llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 20 +------------------- 11 files changed, 35 insertions(+), 81 deletions(-) diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index d570e3f..768fea6 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3277,12 +3277,12 @@ bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL, // The type Ty passed here would then be "void". Skip the alignment // checks, but do not return false right away, since that confuses // LSR into crashing. - unsigned A = DL.getABITypeAlignment(Ty); + Align A = DL.getABITypeAlign(Ty); // The base offset must be a multiple of the alignment. - if ((AM.BaseOffs % A) != 0) + if (!isAligned(A, AM.BaseOffs)) return false; // The shifted offset must fit in 11 bits. - if (!isInt<11>(AM.BaseOffs >> Log2_32(A))) + if (!isInt<11>(AM.BaseOffs >> Log2(A))) return false; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 46d5522..0743a5a 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -14772,17 +14772,18 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, EVT MemVT = LD->getMemoryVT(); Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); + Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty); Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); - unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); + Align ScalarABIAlignment = DAG.getDataLayout().getABITypeAlign(STy); if (LD->isUnindexed() && VT.isVector() && ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && // P8 and later hardware should just use LOAD. - !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || - VT == MVT::v4i32 || VT == MVT::v4f32)) || + !Subtarget.hasP8Vector() && + (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || + VT == MVT::v4f32)) || (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && - LD->getAlignment() >= ScalarABIAlignment)) && - LD->getAlignment() < ABIAlignment) { + LD->getAlign() >= ScalarABIAlignment)) && + LD->getAlign() < ABIAlignment) { // This is a type-legal unaligned Altivec or QPX load. SDValue Chain = LD->getChain(); SDValue Ptr = LD->getBasePtr(); diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 84f8200..b305940 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1127,10 +1127,8 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true)) return false; - unsigned Alignment = S->getAlignment(); - unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType()); - if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = ABIAlignment; + Align Alignment = S->getAlign(); + Align ABIAlignment = DL.getABITypeAlign(Val->getType()); bool Aligned = Alignment >= ABIAlignment; X86AddressMode AM; @@ -1321,14 +1319,9 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) { if (!X86SelectAddress(Ptr, AM)) return false; - unsigned Alignment = LI->getAlignment(); - unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType()); - if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = ABIAlignment; - unsigned ResultReg = 0; if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, - Alignment)) + LI->getAlign().value())) return false; updateValueMap(I, ResultReg); diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 372d87e..437451b 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -509,9 +509,8 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { std::map NewGlobals; // Get the alignment of the global, either explicit or target-specific. - unsigned StartAlignment = GV->getAlignment(); - if (StartAlignment == 0) - StartAlignment = DL.getABITypeAlignment(GV->getType()); + Align StartAlignment = + DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getType()); // Loop over all users and create replacement variables for used aggregate // elements. @@ -554,7 +553,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { // had 256 byte alignment for example, something might depend on that: // propagate info to each field. uint64_t FieldOffset = Layout.getElementOffset(ElementIdx); - Align NewAlign(MinAlign(StartAlignment, FieldOffset)); + Align NewAlign = commonAlignment(StartAlignment, FieldOffset); if (NewAlign > DL.getABITypeAlign(STy->getElementType(ElementIdx))) NGV->setAlignment(NewAlign); @@ -570,7 +569,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { // Calculate the known alignment of the field. If the original aggregate // had 256 byte alignment for example, something might depend on that: // propagate info to each field. - Align NewAlign(MinAlign(StartAlignment, EltSize * ElementIdx)); + Align NewAlign = commonAlignment(StartAlignment, EltSize * ElementIdx); if (NewAlign > EltAlign) NGV->setAlignment(NewAlign); transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx, diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp index b095819..876ad88 100644 --- a/llvm/lib/Transforms/IPO/Inliner.cpp +++ b/llvm/lib/Transforms/IPO/Inliner.cpp @@ -191,8 +191,8 @@ static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, // function. Also, AllocasForType can be empty of course! bool MergedAwayAlloca = false; for (AllocaInst *AvailableAlloca : AllocasForType) { - unsigned Align1 = AI->getAlignment(), - Align2 = AvailableAlloca->getAlignment(); + Align Align1 = AI->getAlign(); + Align Align2 = AvailableAlloca->getAlign(); // The available alloca has to be in the right function, not in some other // function in this SCC. @@ -219,18 +219,8 @@ static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, AI->replaceAllUsesWith(AvailableAlloca); - if (Align1 != Align2) { - if (!Align1 || !Align2) { - const DataLayout &DL = Caller->getParent()->getDataLayout(); - unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); - - Align1 = Align1 ? Align1 : TypeAlign; - Align2 = Align2 ? Align2 : TypeAlign; - } - - if (Align1 > Align2) - AvailableAlloca->setAlignment(AI->getAlign()); - } + if (Align1 > Align2) + AvailableAlloca->setAlignment(AI->getAlign()); AI->eraseFromParent(); MergedAwayAlloca = true; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index a8c87ea..5e8842d 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -93,8 +93,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, Type *CastElTy = PTy->getElementType(); if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; - unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy); - unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy); + Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); + Align CastElTyAlign = DL.getABITypeAlign(CastElTy); if (CastElTyAlign < AllocElTyAlign) return nullptr; // If the allocation has multiple uses, only promote it if we are strictly diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index f1f94df..2846319 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1369,15 +1369,9 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) { return; } - uint64_t Align; - if (ClPreserveAlignment) { - Align = LI.getAlignment(); - if (Align == 0) - Align = DL.getABITypeAlignment(LI.getType()); - } else { - Align = 1; - } - Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI); + Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1); + Value *Shadow = + DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI); if (ClCombinePointerLabelsOnLoad) { Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 9bf3a9e..f825cf9 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1109,7 +1109,7 @@ struct MemorySanitizerVisitor : public InstVisitor { void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, unsigned Size, Align Alignment) { const DataLayout &DL = F.getParent()->getDataLayout(); - const Align IntptrAlignment = Align(DL.getABITypeAlignment(MS.IntptrTy)); + const Align IntptrAlignment = DL.getABITypeAlign(MS.IntptrTy); unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); assert(IntptrAlignment >= kMinOriginAlignment); assert(IntptrSize >= kOriginSize); diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index 5597266..1a22eda 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -1894,7 +1894,7 @@ bool llvm::promoteLoopAccessesToScalars( // We start with an alignment of one and try to find instructions that allow // us to prove better alignment. - unsigned Alignment = 1; + Align Alignment; // Keep track of which types of access we see bool SawUnorderedAtomic = false; bool SawNotAtomic = false; @@ -1942,10 +1942,7 @@ bool llvm::promoteLoopAccessesToScalars( SawUnorderedAtomic |= Load->isAtomic(); SawNotAtomic |= !Load->isAtomic(); - unsigned InstAlignment = Load->getAlignment(); - if (!InstAlignment) - InstAlignment = - MDL.getABITypeAlignment(Load->getType()); + Align InstAlignment = Load->getAlign(); // Note that proving a load safe to speculate requires proving // sufficient alignment at the target location. Proving it guaranteed @@ -1973,10 +1970,7 @@ bool llvm::promoteLoopAccessesToScalars( // already know that promotion is safe, since it may have higher // alignment than any other guaranteed stores, in which case we can // raise the alignment on the promoted store. - unsigned InstAlignment = Store->getAlignment(); - if (!InstAlignment) - InstAlignment = - MDL.getABITypeAlignment(Store->getValueOperand()->getType()); + Align InstAlignment = Store->getAlign(); if (!DereferenceableInPH || !SafeToInsertStore || (InstAlignment > Alignment)) { @@ -2079,7 +2073,8 @@ bool llvm::promoteLoopAccessesToScalars( SSAUpdater SSA(&NewPHIs); LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL, - Alignment, SawUnorderedAtomic, AATags, *SafetyInfo); + Alignment.value(), SawUnorderedAtomic, AATags, + *SafetyInfo); // Set up the preheader to have a definition of the value. It is the live-out // value from the preheader that uses in the loop will use. @@ -2088,7 +2083,7 @@ bool llvm::promoteLoopAccessesToScalars( SomePtr->getName() + ".promoted", Preheader->getTerminator()); if (SawUnorderedAtomic) PreheaderLoad->setOrdering(AtomicOrdering::Unordered); - PreheaderLoad->setAlignment(Align(Alignment)); + PreheaderLoad->setAlignment(Alignment); PreheaderLoad->setDebugLoc(DebugLoc()); if (AATags) PreheaderLoad->setAAMetadata(AATags); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 68c0162..fea1079 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -4267,7 +4267,7 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset()); // If we will get at least this much alignment from the type alone, leave // the alloca's alignment unconstrained. - const bool IsUnconstrained = Alignment <= DL.getABITypeAlignment(SliceTy); + const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy); NewAI = new AllocaInst( SliceTy, AI.getType()->getAddressSpace(), nullptr, IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment, diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index e6ca52a..b450d71 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -3147,29 +3147,11 @@ static bool mergeConditionalStoreToAddress(BasicBlock *PTB, BasicBlock *PFB, PStore->getAAMetadata(AAMD, /*Merge=*/false); PStore->getAAMetadata(AAMD, /*Merge=*/true); SI->setAAMetadata(AAMD); - unsigned PAlignment = PStore->getAlignment(); - unsigned QAlignment = QStore->getAlignment(); - unsigned TypeAlignment = - DL.getABITypeAlignment(SI->getValueOperand()->getType()); - unsigned MinAlignment; - unsigned MaxAlignment; - std::tie(MinAlignment, MaxAlignment) = std::minmax(PAlignment, QAlignment); // Choose the minimum alignment. If we could prove both stores execute, we // could use biggest one. In this case, though, we only know that one of the // stores executes. And we don't know it's safe to take the alignment from a // store that doesn't execute. - if (MinAlignment != 0) { - // Choose the minimum of all non-zero alignments. - SI->setAlignment(Align(MinAlignment)); - } else if (MaxAlignment != 0) { - // Choose the minimal alignment between the non-zero alignment and the ABI - // default alignment for the type of the stored value. - SI->setAlignment(Align(std::min(MaxAlignment, TypeAlignment))); - } else { - // If both alignments are zero, use ABI default alignment for the type of - // the stored value. - SI->setAlignment(Align(TypeAlignment)); - } + SI->setAlignment(std::min(PStore->getAlign(), QStore->getAlign())); QStore->eraseFromParent(); PStore->eraseFromParent(); -- 2.7.4