From c94d32a6b30263ea448ac6ae0f95641b8574c5f5 Mon Sep 17 00:00:00 2001 From: dfukalov Date: Fri, 5 Jun 2020 02:18:18 +0300 Subject: [PATCH] [AMDGPU] Increase max iterations count to analyze complete unroll Summary: In some cases inner loops may not get boosts so try to analyze them deeper. Reviewers: rampitec, mzolotukhin Reviewed By: rampitec Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, zzheng, kerbowa, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81204 --- llvm/include/llvm/Analysis/TargetTransformInfo.h | 3 ++ .../Target/AMDGPU/AMDGPUTargetTransformInfo.cpp | 10 +++++ llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp | 14 ++++--- .../AMDGPU/unroll-analyze-small-loops.ll | 49 ++++++++++++++++++++++ 4 files changed, 71 insertions(+), 5 deletions(-) create mode 100644 llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-analyze-small-loops.ll diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 9cb388c..0605155 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -503,6 +503,9 @@ public: /// This value is used in the same manner to limit the size of the inner /// loop. unsigned UnrollAndJamInnerLoopThreshold; + /// Don't allow loop unrolling to simulate more than this number of + /// iterations when checking full unroll profitability + unsigned MaxIterationsCountToAnalyze; }; /// Get target-customized preferences for the generic loop unrolling diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 094fb5f7..085ba47 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -79,6 +79,11 @@ static cl::opt UseLegacyDA( cl::desc("Enable legacy divergence analysis for AMDGPU"), cl::init(false), cl::Hidden); +static cl::opt UnrollMaxBlockToAnalyze( + "amdgpu-unroll-max-block-to-analyze", + cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"), + cl::init(20), cl::Hidden); + static bool dependsOnLocalPhi(const Loop *L, const Value *Cond, unsigned Depth = 0) { const Instruction *I = dyn_cast(Cond); @@ -223,6 +228,11 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, if (UP.Threshold >= MaxBoost) return; } + + // If we got a GEP in a small BB from inner loop then increase max trip + // count to analyze for better estimation cost in unroll + if (L->empty() && BB->size() < UnrollMaxBlockToAnalyze) + UP.MaxIterationsCountToAnalyze = 32; } } diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index e94eb40..ec56610 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -223,6 +223,7 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences( UP.UnrollAndJam = false; UP.PeelProfiledIterations = true; UP.UnrollAndJamInnerLoopThreshold = 60; + UP.MaxIterationsCountToAnalyze = UnrollMaxIterationsCountToAnalyze; // Override with any target specific settings TTI.getUnrollingPreferences(L, SE, UP); @@ -264,6 +265,8 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences( UP.AllowLoopNestsPeeling = UnrollAllowLoopNestsPeeling; if (UnrollUnrollRemainder.getNumOccurrences() > 0) UP.UnrollRemainder = UnrollUnrollRemainder; + if (UnrollMaxIterationsCountToAnalyze.getNumOccurrences() > 0) + UP.MaxIterationsCountToAnalyze = UnrollMaxIterationsCountToAnalyze; // Apply user values provided by argument if (UserThreshold.hasValue()) { @@ -353,11 +356,12 @@ struct EstimatedUnrollCost { static Optional analyzeLoopUnrollCost( const Loop *L, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE, const SmallPtrSetImpl &EphValues, - const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize) { + const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize, + unsigned MaxIterationsCountToAnalyze) { // We want to be able to scale offsets by the trip count and add more offsets // to them without checking for overflows, and we already don't want to // analyze *massive* trip counts, so we force the max to be reasonably small. - assert(UnrollMaxIterationsCountToAnalyze < + assert(MaxIterationsCountToAnalyze < (unsigned)(std::numeric_limits::max() / 2) && "The unroll iterations max is too large!"); @@ -367,8 +371,7 @@ static Optional analyzeLoopUnrollCost( return None; // Don't simulate loops with a big or unknown tripcount - if (!UnrollMaxIterationsCountToAnalyze || !TripCount || - TripCount > UnrollMaxIterationsCountToAnalyze) + if (!TripCount || TripCount > MaxIterationsCountToAnalyze) return None; SmallSetVector BBWorklist; @@ -845,7 +848,8 @@ bool llvm::computeUnrollCount( // To check that, run additional analysis on the loop. if (Optional Cost = analyzeLoopUnrollCost( L, FullUnrollTripCount, DT, SE, EphValues, TTI, - UP.Threshold * UP.MaxPercentThresholdBoost / 100)) { + UP.Threshold * UP.MaxPercentThresholdBoost / 100, + UP.MaxIterationsCountToAnalyze)) { unsigned Boost = getFullUnrollBoostingFactor(*Cost, UP.MaxPercentThresholdBoost); if (Cost->UnrolledCost < UP.Threshold * Boost / 100) { diff --git a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-analyze-small-loops.ll b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-analyze-small-loops.ll new file mode 100644 index 0000000..423d65c --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-analyze-small-loops.ll @@ -0,0 +1,49 @@ +; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -loop-unroll -unroll-threshold=150 < %s | FileCheck %s + +; Test that max iterations count to analyze (specific for the target) +; is enough to make the inner loop completely unrolled +define hidden void @foo(float addrspace(1)* %ptrG, float addrspace(3)* %ptrL, i32 %A, i32 %A2, i32 %M) { +bb: + br label %bb2 + +bb2: ; preds = %bb7, %bb + %i = phi i32 [ 0, %bb ], [ %i8, %bb7 ] + br label %bb4 + +bb3: ; preds = %bb7 + ret void + +bb4: ; preds = %bb10, %bb2 + %i5 = phi i32 [ 0, %bb2 ], [ %i11, %bb10 ] + %i6 = add nuw nsw i32 %i5, %i + br label %for.body + +bb7: ; preds = %bb10 + %i8 = add nuw nsw i32 %i, 1 + %i9 = icmp eq i32 %i8, 8 + br i1 %i9, label %bb3, label %bb2 + +bb10: ; preds = %for.body + %i11 = add nuw nsw i32 %i5, 1 + %cmpj = icmp ult i32 %i11, 8 + br i1 %cmpj, label %bb7, label %bb4 + +; CHECK: for.body: +; CHECK-NOT: %phi = phi {{.*}} +for.body: ; preds = %for.body, %bb4 + %phi = phi i32 [ 0, %bb4 ], [ %k, %for.body ] + %mul = shl nuw nsw i32 %phi, 5 + %add1 = add i32 %A, %mul + %add2 = add i32 %add1, %M + %arrayidx = getelementptr inbounds float, float addrspace(3)* %ptrL, i32 %add2 + %bc = bitcast float addrspace(3)* %arrayidx to i32 addrspace(3)* + %ld = load i32, i32 addrspace(3)* %bc, align 4 + %mul2 = shl nuw nsw i32 %phi, 3 + %add3 = add nuw nsw i32 %mul2, %A2 + %arrayidx2 = getelementptr inbounds float, float addrspace(1)* %ptrG, i32 %add3 + %bc2 = bitcast float addrspace(1)* %arrayidx2 to i32 addrspace(1)* + store i32 %ld, i32 addrspace(1)* %bc2, align 4 + %k = add nuw nsw i32 %phi, 1 + %cmpk = icmp ult i32 %k, 32 + br i1 %cmpk, label %for.body, label %bb10 +} -- 2.7.4