From: Alina Sbirlea Date: Wed, 29 Apr 2020 05:57:19 +0000 (-0700) Subject: [MemorySSA] Pass DT to the upward iterator for proper PhiTranslation. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=161ccfe5bad52aba7518034cea4814aea0906c69;p=platform%2Fupstream%2Fllvm.git [MemorySSA] Pass DT to the upward iterator for proper PhiTranslation. Summary: A valid DominatorTree is needed to do PhiTranslation. Before this patch, a MemoryUse could be optimized to an access outside a loop, while the address it loads from is modified in the loop. This can lead to a miscompile. Reviewers: george.burgess.iv Subscribers: Prazek, hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D79068 --- diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h index cc79b7e..24b806b 100644 --- a/llvm/include/llvm/Analysis/MemorySSA.h +++ b/llvm/include/llvm/Analysis/MemorySSA.h @@ -723,6 +723,8 @@ public: return cast_or_null(ValueToMemoryAccess.lookup(cast(BB))); } + DominatorTree &getDomTree() const { return *DT; } + void dump() const; void print(raw_ostream &) const; @@ -1179,9 +1181,9 @@ class upward_defs_iterator using BaseT = upward_defs_iterator::iterator_facade_base; public: - upward_defs_iterator(const MemoryAccessPair &Info) + upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT) : DefIterator(Info.first), Location(Info.second), - OriginalAccess(Info.first) { + OriginalAccess(Info.first), DT(DT) { CurrentPair.first = nullptr; WalkingPhi = Info.first && isa(Info.first); @@ -1220,12 +1222,13 @@ private: const_cast(Location.Ptr), OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), - DefIterator.getPhiArgBlock(), nullptr, - false)) + DefIterator.getPhiArgBlock(), DT, + false)) { if (Translator.getAddr() != Location.Ptr) { CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); return; } + } } CurrentPair.second = Location; } @@ -1235,17 +1238,19 @@ private: MemoryLocation Location; MemoryAccess *OriginalAccess = nullptr; bool WalkingPhi = false; + DominatorTree *DT = nullptr; }; -inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { - return upward_defs_iterator(Pair); +inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, + DominatorTree &DT) { + return upward_defs_iterator(Pair, &DT); } inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } inline iterator_range -upward_defs(const MemoryAccessPair &Pair) { - return make_range(upward_defs_begin(Pair), upward_defs_end()); +upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { + return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); } /// Walks the defining accesses of MemoryDefs. Stops after we hit something that diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp index aeb3fec..f2f5fd7 100644 --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -466,7 +466,8 @@ checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, assert(isa(MA)); Worklist.append( - upward_defs_begin({const_cast(MA), MAP.second}), + upward_defs_begin({const_cast(MA), MAP.second}, + MSSA.getDomTree()), upward_defs_end()); } } @@ -595,8 +596,8 @@ template class ClobberWalker { void addSearches(MemoryPhi *Phi, SmallVectorImpl &PausedSearches, ListIndex PriorNode) { - auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), - upward_defs_end()); + auto UpwardDefs = make_range( + upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT), upward_defs_end()); for (const MemoryAccessPair &P : UpwardDefs) { PausedSearches.push_back(Paths.size()); Paths.emplace_back(P.second, P.first, PriorNode); diff --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll index 4aa24f2..4d81116 100644 --- a/llvm/test/Analysis/MemorySSA/phi-translation.ll +++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll @@ -193,3 +193,45 @@ if.end: br label %while.cond } +; CHECK-LABEL: define i32 @use_not_optimized_due_to_backedge +define i32 @use_not_optimized_due_to_backedge(i32* nocapture %m_i_strides, i32* nocapture readonly %eval_left_dims) { +entry: +; CHECK: 1 = MemoryDef(liveOnEntry) +; CHECK_NEXT: store i32 1, i32* %m_i_strides, align 4 + store i32 1, i32* %m_i_strides, align 4 + br label %for.body + +for.cond.cleanup: ; preds = %for.inc + ret i32 %m_i_size.1 + +for.body: ; preds = %entry, %for.inc +; CHECK: 4 = MemoryPhi({entry,1},{for.inc,3}) +; CHECK-NEXT: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] + %m_i_size.022 = phi i32 [ 1, %entry ], [ %m_i_size.1, %for.inc ] + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp1 = icmp eq i64 %indvars.iv, 0 + %arrayidx2 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv +; CHECK: MemoryUse(4) MayAlias +; CHECK-NEXT: %0 = load i32, i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds i32, i32* %eval_left_dims, i64 %indvars.iv +; CHECK: MemoryUse(4) MayAlias +; CHECK-NEXT: %1 = load i32, i32* %arrayidx4, align 4 + %1 = load i32, i32* %arrayidx4, align 4 + %mul = mul nsw i32 %1, %0 + br i1 %cmp1, label %if.then, label %for.inc + +if.then: ; preds = %for.body + %arrayidx7 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv.next +; CHECK: 2 = MemoryDef(4) +; CHECK-NEXT: store i32 %mul, i32* %arrayidx7, align 4 + store i32 %mul, i32* %arrayidx7, align 4 + br label %for.inc + +for.inc: ; preds = %for.body, %if.then +; CHECK: 3 = MemoryPhi({for.body,4},{if.then,2}) +; CHECK-NEXT: %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ] + %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ] + br i1 %cmp1, label %for.body, label %for.cond.cleanup +}