private:
// Whether analysis should be performed by GPUDivergenceAnalysis.
- bool shouldUseGPUDivergenceAnalysis(const Function &F,
- const TargetTransformInfo &TTI) const;
+ bool shouldUseGPUDivergenceAnalysis(const Function &F) const;
// (optional) handle to new DivergenceAnalysis
std::unique_ptr<GPUDivergenceAnalysis> gpuDA;
/// branches.
bool hasBranchDivergence() const;
- /// Return true if the target prefers to use GPU divergence analysis to
- /// replace the legacy version.
- bool useGPUDivergenceAnalysis() const;
-
/// Returns whether V is a source of divergence.
///
/// This function provides the target-dependent information for
virtual int
getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0;
virtual bool hasBranchDivergence() = 0;
- virtual bool useGPUDivergenceAnalysis() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
virtual bool isAlwaysUniform(const Value *V) = 0;
virtual unsigned getFlatAddressSpace() = 0;
return Impl.getUserCost(U, Operands);
}
bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
- bool useGPUDivergenceAnalysis() override { return Impl.useGPUDivergenceAnalysis(); }
bool isSourceOfDivergence(const Value *V) override {
return Impl.isSourceOfDivergence(V);
}
bool hasBranchDivergence() { return false; }
- bool useGPUDivergenceAnalysis() { return false; }
-
bool isSourceOfDivergence(const Value *V) { return false; }
bool isAlwaysUniform(const Value *V) { return false; }
bool hasBranchDivergence() { return false; }
- bool useGPUDivergenceAnalysis() { return false; }
-
bool isSourceOfDivergence(const Value *V) { return false; }
bool isAlwaysUniform(const Value *V) { return false; }
void LegacyDivergenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTreeWrapperPass>();
- AU.addRequired<LoopInfoWrapperPass>();
+ if (UseGPUDA)
+ AU.addRequired<LoopInfoWrapperPass>();
AU.setPreservesAll();
}
bool LegacyDivergenceAnalysis::shouldUseGPUDivergenceAnalysis(
- const Function &F, const TargetTransformInfo &TTI) const {
- if (!(UseGPUDA || TTI.useGPUDivergenceAnalysis()))
+ const Function &F) const {
+ if (!UseGPUDA)
return false;
// GPUDivergenceAnalysis requires a reducible CFG.
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
- if (shouldUseGPUDivergenceAnalysis(F, TTI)) {
+ if (shouldUseGPUDivergenceAnalysis(F)) {
// run the new GPU divergence analysis
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
gpuDA = std::make_unique<GPUDivergenceAnalysis>(F, DT, PDT, LI, TTI);
return TTIImpl->hasBranchDivergence();
}
-bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
- return TTIImpl->useGPUDivergenceAnalysis();
-}
-
bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
return TTIImpl->isSourceOfDivergence(V);
}
cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
cl::init(150), cl::Hidden);
-static cl::opt<bool> UseLegacyDA(
- "amdgpu-use-legacy-divergence-analysis",
- cl::desc("Enable legacy divergence analysis for AMDGPU"),
- cl::init(false), cl::Hidden);
-
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
unsigned Depth = 0) {
const Instruction *I = dyn_cast<Instruction>(Cond);
}
}
-/// \returns true if the new GPU divergence analysis is enabled.
-bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
- return !UseLegacyDA;
-}
-
/// \returns true if the result of the value could potentially be
/// different across workitems in a wavefront.
bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
HasFP32Denormals(ST->hasFP32Denormals(F)) { }
bool hasBranchDivergence() { return true; }
- bool useGPUDivergenceAnalysis() const;
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP);