From: Matt Arsenault Date: Fri, 24 Mar 2017 18:56:43 +0000 (+0000) Subject: TTI: Split IsSimple in MemIntrinsicInfo X-Git-Tag: llvmorg-5.0.0-rc1~9226 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=18bb24a1be07772afd77c0bc8c33a0822fe22956;p=platform%2Fupstream%2Fllvm.git TTI: Split IsSimple in MemIntrinsicInfo All this did before was assert in EarlyCSE. llvm-svn: 298724 --- diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 2f4f336..450f799 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -44,23 +44,26 @@ class Value; /// \brief Information about a load/store intrinsic defined by the target. struct MemIntrinsicInfo { - MemIntrinsicInfo() - : ReadMem(false), WriteMem(false), IsSimple(false), MatchingId(0), - NumMemRefs(0), PtrVal(nullptr) {} - bool ReadMem; - bool WriteMem; - /// True only if this memory operation is non-volatile, non-atomic, and - /// unordered. (See LoadInst/StoreInst for details on each) - bool IsSimple; - // Same Id is set by the target for corresponding load/store intrinsics. - unsigned short MatchingId; - int NumMemRefs; - /// This is the pointer that the intrinsic is loading from or storing to. /// If this is non-null, then analysis/optimization passes can assume that /// this intrinsic is functionally equivalent to a load/store from this /// pointer. - Value *PtrVal; + Value *PtrVal = nullptr; + + // Ordering for atomic operations. + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; + + // Same Id is set by the target for corresponding load/store intrinsics. + unsigned short MatchingId = 0; + + bool ReadMem = false; + bool WriteMem = false; + bool IsVolatile = false; + + bool isUnordered() const { + return (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Unordered) && !IsVolatile; + } }; /// \brief This pass provides access to the codegen interfaces that are needed diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 97df0961..be14a83 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -596,8 +596,6 @@ bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, case Intrinsic::aarch64_neon_ld4: Info.ReadMem = true; Info.WriteMem = false; - Info.IsSimple = true; - Info.NumMemRefs = 1; Info.PtrVal = Inst->getArgOperand(0); break; case Intrinsic::aarch64_neon_st2: @@ -605,8 +603,6 @@ bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, case Intrinsic::aarch64_neon_st4: Info.ReadMem = false; Info.WriteMem = true; - Info.IsSimple = true; - Info.NumMemRefs = 1; Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); break; } diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp index a60097c..50141e4 100644 --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -392,7 +392,7 @@ private: ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) : IsTargetMemInst(false), Inst(Inst) { if (IntrinsicInst *II = dyn_cast(Inst)) - if (TTI.getTgtMemIntrinsic(II, Info) && Info.NumMemRefs == 1) + if (TTI.getTgtMemIntrinsic(II, Info)) IsTargetMemInst = true; } bool isLoad() const { @@ -404,17 +404,14 @@ private: return isa(Inst); } bool isAtomic() const { - if (IsTargetMemInst) { - assert(Info.IsSimple && "need to refine IsSimple in TTI"); - return false; - } + if (IsTargetMemInst) + return Info.Ordering != AtomicOrdering::NotAtomic; return Inst->isAtomic(); } bool isUnordered() const { - if (IsTargetMemInst) { - assert(Info.IsSimple && "need to refine IsSimple in TTI"); - return true; - } + if (IsTargetMemInst) + return Info.isUnordered(); + if (LoadInst *LI = dyn_cast(Inst)) { return LI->isUnordered(); } else if (StoreInst *SI = dyn_cast(Inst)) { @@ -425,10 +422,9 @@ private: } bool isVolatile() const { - if (IsTargetMemInst) { - assert(Info.IsSimple && "need to refine IsSimple in TTI"); - return false; - } + if (IsTargetMemInst) + return Info.IsVolatile; + if (LoadInst *LI = dyn_cast(Inst)) { return LI->isVolatile(); } else if (StoreInst *SI = dyn_cast(Inst)) {