From 816097e4e5f32b59c2b6099f74356f1dbe55aabb Mon Sep 17 00:00:00 2001 From: David Green Date: Thu, 20 Aug 2020 14:31:14 +0100 Subject: [PATCH] [LV] Allow tail folded reduction selects to remain in the loop The normal scheme for tail folding reductions is to use: loop: p = phi(0, a) mask = ... x = masked_load(..., mask) a = add(x, p) s = select(mask, a, p) This means we need to keep the register p and a alive out of the loop, plus the mask. On a target with predicated operations we can instead generate the phi as p = phi(0, s). This ensures the select in the loop and we can fold select(m, add(a, b), c) to something like a vaddt c, a, b using the m predicate. This in turn allows us to tail predicate the entire loop. Differential Revision: https://reviews.llvm.org/D84741 --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 17 +++++ .../LoopVectorize/reduction-predselect.ll | 86 +++++++++++----------- 2 files changed, 60 insertions(+), 43 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index e1a82f5..21d650ca 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -271,6 +271,11 @@ static cl::opt cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference.")); +static cl::opt PreferPredicatedReductionSelect( + "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, + cl::desc( + "Prefer predicating a reduction operation over an after loop select.")); + cl::opt EnableVPlanNativePath( "enable-vplan-native-path", cl::init(false), cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " @@ -3917,6 +3922,18 @@ void InnerLoopVectorizer::fixReduction(PHINode *Phi) { } assert(Sel && "Reduction exit feeds no select"); VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); + + // If the target can create a predicated operator for the reduction at no + // extra cost in the loop (for example a predicated vadd), it can be + // cheaper for the select to remain in the loop than be sunk out of it, + // and so use the select value for the phi instead of the old + // LoopExitValue. + RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; + if (PreferPredicatedReductionSelect) { + auto *VecRdxPhi = cast(getOrCreateVectorValue(Phi, Part)); + VecRdxPhi->setIncomingValueForBlock( + LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); + } } } diff --git a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll index aaae03b..cb29395 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -prefer-predicate-over-epilog -force-reduction-intrinsics -dce -instcombine -S | FileCheck %s +; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -prefer-predicate-over-epilog -prefer-predicated-reduction-select -force-reduction-intrinsics -dce -instcombine -S | FileCheck %s target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" define i32 @reduction_sum_single(i32* noalias nocapture %A) { ; CHECK-LABEL: @reduction_sum_single( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP24:%.*]], %pred.load.continue6 ] -; CHECK: [[TMP24]] = add <4 x i32> [[VEC_PHI]], [[TMP23:%.*]] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP25:%.*]], %pred.load.continue6 ] +; CHECK: [[TMP24:%.*]] = select <4 x i1> [[TMP0:%.*]], <4 x i32> [[TMP23:%.*]], <4 x i32> zeroinitializer +; CHECK: [[TMP25]] = add <4 x i32> [[VEC_PHI]], [[TMP24]] ; CHECK: middle.block: -; CHECK: [[TMP26:%.*]] = select <4 x i1> [[TMP0:%.*]], <4 x i32> [[TMP24]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP27:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP26]]) +; CHECK: [[TMP27:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP25]]) ; entry: br label %.lr.ph @@ -32,14 +32,13 @@ entry: define i32 @reduction_sum(i32* noalias nocapture %A, i32* noalias nocapture %B) { ; CHECK-LABEL: @reduction_sum( -; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP47:%.*]], %pred.load.continue14 ] ; CHECK: [[TMP44:%.*]] = add <4 x i32> [[VEC_PHI]], [[VEC_IND:%.*]] ; CHECK: [[TMP45:%.*]] = add <4 x i32> [[TMP44]], [[TMP23:%.*]] -; CHECK: [[TMP46]] = add <4 x i32> [[TMP45]], [[TMP43:%.*]] +; CHECK: [[TMP46:%.*]] = add <4 x i32> [[TMP45]], [[TMP43:%.*]] +; CHECK: [[TMP47]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP46]], <4 x i32> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP48:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP46]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP49:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP48]]) +; CHECK: [[TMP49:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP47]]) ; entry: br label %.lr.ph @@ -66,12 +65,12 @@ entry: define i32 @reduction_prod(i32* noalias nocapture %A, i32* noalias nocapture %B) { ; CHECK-LABEL: @reduction_prod( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP45:%.*]], %pred.load.continue14 ] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] ; CHECK: [[TMP44:%.*]] = mul <4 x i32> [[VEC_PHI]], [[TMP23:%.*]] -; CHECK: [[TMP45]] = mul <4 x i32> [[TMP44]], [[TMP43:%.*]] +; CHECK: [[TMP45:%.*]] = mul <4 x i32> [[TMP44]], [[TMP43:%.*]] +; CHECK: [[TMP46]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP45]], <4 x i32> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP47:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP45]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> [[TMP47]]) +; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> [[TMP46]]) ; entry: br label %.lr.ph @@ -97,12 +96,12 @@ entry: define i32 @reduction_and(i32* nocapture %A, i32* nocapture %B) { ; CHECK-LABEL: @reduction_and( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP45:%.*]], %pred.load.continue14 ] -; CHECK: [[TMP44:%.*]] = and <4 x i32> [[VEC_PHI]], [[TMP42:%.*]] -; CHECK: [[TMP45]] = and <4 x i32> [[TMP44]], [[TMP43]] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] +; CHECK: [[TMP44:%.*]] = and <4 x i32> [[VEC_PHI]], [[TMP23:%.*]] +; CHECK: [[TMP45:%.*]] = and <4 x i32> [[TMP44]], [[TMP43:%.*]] +; CHECK: [[TMP46]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP45]], <4 x i32> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP47:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP45]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> [[TMP47]]) +; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> [[TMP46]]) ; entry: br label %for.body @@ -128,11 +127,11 @@ for.end: ; preds = %for.body, %entry define i32 @reduction_or(i32* nocapture %A, i32* nocapture %B) { ; CHECK-LABEL: @reduction_or( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP45:%.*]], %pred.load.continue14 ] -; CHECK: [[TMP45]] = or <4 x i32> [[TMP44:%.*]], [[VEC_PHI]] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] +; CHECK: [[TMP45:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP44:%.*]], <4 x i32> zeroinitializer +; CHECK: [[TMP46]] = or <4 x i32> [[VEC_PHI]], [[TMP45]] ; CHECK: middle.block: -; CHECK: [[TMP47:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP45]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> [[TMP47]]) +; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> [[TMP46]]) ; entry: br label %for.body @@ -157,11 +156,12 @@ for.end: ; preds = %for.body, %entry define i32 @reduction_xor(i32* nocapture %A, i32* nocapture %B) { ; CHECK-LABEL: @reduction_xor( -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP45:%.*]], %pred.load.continue14 ] -; CHECK: [[TMP45]] = xor <4 x i32> [[TMP44:%.*]], [[VEC_PHI]] +; CHECK: vector.body: +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] +; CHECK: [[TMP45:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP44:%.*]], <4 x i32> zeroinitializer +; CHECK: [[TMP46]] = xor <4 x i32> [[VEC_PHI]], [[TMP45]] ; CHECK: middle.block: -; CHECK: [[TMP47:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x i32> [[TMP45]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> [[TMP47]]) +; CHECK: [[TMP48:%.*]] = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> [[TMP46]]) ; entry: br label %for.body @@ -187,12 +187,12 @@ for.end: ; preds = %for.body, %entry define float @reduction_fadd(float* nocapture %A, float* nocapture %B) { ; CHECK-LABEL: @reduction_fadd( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %vector.ph ], [ [[TMP45:%.*]], %pred.load.continue14 ] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] ; CHECK: [[TMP44:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[TMP23:%.*]] -; CHECK: [[TMP45]] = fadd fast <4 x float> [[TMP44]], [[TMP43]] +; CHECK: [[TMP45:%.*]] = fadd fast <4 x float> [[TMP44]], [[TMP43:%.*]] +; CHECK: [[TMP46]] = select <4 x i1> [[TMP3:%.*]], <4 x float> [[TMP45]], <4 x float> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP47:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x float> [[TMP45]], <4 x float> [[VEC_PHI]] -; CHECK: [[TMP48:%.*]] = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.000000e+00, <4 x float> [[TMP47]]) +; CHECK: [[TMP48:%.*]] = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.000000e+00, <4 x float> [[TMP46]]) ; entry: br label %for.body @@ -218,12 +218,12 @@ for.end: ; preds = %for.body, %entry define float @reduction_fmul(float* nocapture %A, float* nocapture %B) { ; CHECK-LABEL: @reduction_fmul( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x float> [ , %vector.ph ], [ [[TMP45:%.*]], %pred.load.continue14 ] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x float> [ , %vector.ph ], [ [[TMP46:%.*]], %pred.load.continue14 ] ; CHECK: [[TMP44:%.*]] = fmul fast <4 x float> [[VEC_PHI]], [[TMP23:%.*]] -; CHECK: [[TMP45]] = fmul fast <4 x float> [[TMP44]], [[TMP43]] +; CHECK: [[TMP45:%.*]] = fmul fast <4 x float> [[TMP44]], [[TMP43:%.*]] +; CHECK: [[TMP46]] = select <4 x i1> [[TMP3:%.*]], <4 x float> [[TMP45]], <4 x float> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP47:%.*]] = select <4 x i1> [[TMP3:%.*]], <4 x float> [[TMP45]], <4 x float> [[VEC_PHI]] -; CHECK: [[TMP48:%.*]] = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float 1.000000e+00, <4 x float> [[TMP47]]) +; CHECK: [[TMP48:%.*]] = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float 1.000000e+00, <4 x float> [[TMP46]]) ; entry: br label %for.body @@ -249,12 +249,12 @@ for.end: ; preds = %for.body, %entry define i32 @reduction_min(i32* nocapture %A, i32* nocapture %B) { ; CHECK-LABEL: @reduction_min( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP25:%.*]], %pred.load.continue6 ] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP26:%.*]], %pred.load.continue6 ] ; CHECK: [[TMP24:%.*]] = icmp slt <4 x i32> [[VEC_PHI]], [[TMP23:%.*]] -; CHECK: [[TMP25]] = select <4 x i1> [[TMP24]], <4 x i32> [[VEC_PHI]], <4 x i32> [[TMP23]] +; CHECK: [[TMP25:%.*]] = select <4 x i1> [[TMP24]], <4 x i32> [[VEC_PHI]], <4 x i32> [[TMP23]] +; CHECK: [[TMP26]] = select <4 x i1> [[TMP0:%.*]], <4 x i32> [[TMP25]], <4 x i32> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP27:%.*]] = select <4 x i1> [[TMP0:%.*]], <4 x i32> [[TMP25]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP28:%.*]] = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> [[TMP27]]) +; CHECK: [[TMP28:%.*]] = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> [[TMP26]]) ; entry: br label %for.body @@ -278,12 +278,12 @@ for.end: ; preds = %for.body, %entry define i32 @reduction_max(i32* nocapture %A, i32* nocapture %B) { ; CHECK-LABEL: @reduction_max( ; CHECK: vector.body: -; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP25:%.*]], %pred.load.continue6 ] +; CHECK: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %vector.ph ], [ [[TMP26:%.*]], %pred.load.continue6 ] ; CHECK: [[TMP24:%.*]] = icmp ugt <4 x i32> [[VEC_PHI]], [[TMP23:%.*]] -; CHECK: [[TMP25]] = select <4 x i1> [[TMP24]], <4 x i32> [[VEC_PHI]], <4 x i32> [[TMP23]] +; CHECK: [[TMP25:%.*]] = select <4 x i1> [[TMP24]], <4 x i32> [[VEC_PHI]], <4 x i32> [[TMP23]] +; CHECK: [[TMP26]] = select <4 x i1> [[TMP0:%.*]], <4 x i32> [[TMP25]], <4 x i32> [[VEC_PHI]] ; CHECK: middle.block: -; CHECK: [[TMP27:%.*]] = select <4 x i1> [[TMP0:%.*]], <4 x i32> [[TMP25]], <4 x i32> [[VEC_PHI]] -; CHECK: [[TMP28:%.*]] = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> [[TMP27]]) +; CHECK: [[TMP28:%.*]] = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> [[TMP26]]) ; entry: br label %for.body -- 2.7.4