From 1318ddbc14c28eee63df9cfe3ed92dca82557935 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Sat, 11 Apr 2020 10:05:49 -0400 Subject: [PATCH] [VectorUtils] rename scaleShuffleMask to narrowShuffleMaskElts; NFC As proposed in D77881, we'll have the related widening operation, so this name becomes too vague. While here, change the function signature to take an 'int' rather than 'size_t' for the scaling factor, add an assert for overflow of 32-bits, and improve the documentation comments. --- llvm/include/llvm/Analysis/VectorUtils.h | 16 ++++++++-------- llvm/lib/Analysis/VectorUtils.cpp | 16 +++++++++++----- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++-- llvm/lib/Target/X86/X86ISelLowering.cpp | 20 ++++++++++---------- llvm/lib/Target/X86/X86InterleavedAccess.cpp | 8 ++++---- .../Transforms/InstCombine/InstCombineVectorOps.cpp | 2 +- llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 2 +- llvm/unittests/Analysis/VectorUtilsTest.cpp | 6 +++--- 8 files changed, 40 insertions(+), 34 deletions(-) diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h index 36aea31..d64ad12 100644 --- a/llvm/include/llvm/Analysis/VectorUtils.h +++ b/llvm/include/llvm/Analysis/VectorUtils.h @@ -328,19 +328,19 @@ const Value *getSplatValue(const Value *V); /// not limited by finding a scalar source value to a splatted vector. bool isSplatValue(const Value *V, int Index = -1, unsigned Depth = 0); -/// Scale a shuffle or target shuffle mask, replacing each mask index with the -/// scaled sequential indices for an equivalent mask of narrowed elements. -/// Mask elements that are less than 0 (sentinel values) are repeated in the -/// output mask. +/// Replace each shuffle mask index with the scaled sequential indices for an +/// equivalent mask of narrowed elements. Mask elements that are less than 0 +/// (sentinel values) are repeated in the output mask. /// /// Example with Scale = 4: /// <4 x i32> <3, 2, 0, -1> --> /// <16 x i8> <12, 13, 14, 15, 8, 9, 10, 11, 0, 1, 2, 3, -1, -1, -1, -1> /// -/// This is the reverse process of "canWidenShuffleElements", but can always -/// succeed. -void scaleShuffleMask(size_t Scale, ArrayRef Mask, - SmallVectorImpl &ScaledMask); +/// This is the reverse process of widening shuffle mask elements, but it always +/// succeeds because the indexes can always be multiplied (scaled up) to map to +/// narrower vector elements. +void narrowShuffleMaskElts(int Scale, ArrayRef Mask, + SmallVectorImpl &ScaledMask); /// Compute a map of integer instructions to their minimum legal type /// size. diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index 5468794..a5fa3ec 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -398,8 +398,8 @@ bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { return false; } -void llvm::scaleShuffleMask(size_t Scale, ArrayRef Mask, - SmallVectorImpl &ScaledMask) { +void llvm::narrowShuffleMaskElts(int Scale, ArrayRef Mask, + SmallVectorImpl &ScaledMask) { assert(Scale > 0 && "Unexpected scaling factor"); // Fast-path: if no scaling, then it is just a copy. @@ -409,9 +409,15 @@ void llvm::scaleShuffleMask(size_t Scale, ArrayRef Mask, } ScaledMask.clear(); - for (int MaskElt : Mask) - for (int ScaleElt = 0; ScaleElt != (int)Scale; ++ScaleElt) - ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + ScaleElt); + for (int MaskElt : Mask) { + if (MaskElt >= 0) { + assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= + std::numeric_limits::max() && + "Overflowed 32-bits"); + } + for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) + ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); + } } MapVector diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 10312a5..60701af 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -19841,8 +19841,8 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { ShuffleVectorSDNode *InnerSVN = cast(BC0); SmallVector InnerMask; SmallVector OuterMask; - scaleShuffleMask(InnerScale, InnerSVN->getMask(), InnerMask); - scaleShuffleMask(OuterScale, SVN->getMask(), OuterMask); + narrowShuffleMaskElts(InnerScale, InnerSVN->getMask(), InnerMask); + narrowShuffleMaskElts(OuterScale, SVN->getMask(), OuterMask); // Merge the shuffle masks. SmallVector NewMask; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 1864ccf..17898eb 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7332,8 +7332,8 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts, size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size()); SmallVector Mask0, Mask1; - scaleShuffleMask(MaskSize / SrcMask0.size(), SrcMask0, Mask0); - scaleShuffleMask(MaskSize / SrcMask1.size(), SrcMask1, Mask1); + narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0); + narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1); for (size_t i = 0; i != MaskSize; ++i) { if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef) Mask.push_back(SM_SentinelUndef); @@ -7391,7 +7391,7 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts, if ((NumSubElts % SubMask.size()) == 0) { int Scale = NumSubElts / SubMask.size(); SmallVector ScaledSubMask; - scaleShuffleMask(Scale, SubMask, ScaledSubMask); + narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask); SubMask = ScaledSubMask; } else { int Scale = SubMask.size() / NumSubElts; @@ -16359,7 +16359,7 @@ static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef Mask, SmallVector RepeatedMask; if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) { SmallVector PSHUFDMask; - scaleShuffleMask(2, RepeatedMask, PSHUFDMask); + narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask); return DAG.getBitcast( MVT::v4i64, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, @@ -17008,7 +17008,7 @@ static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef Mask, SmallVector Widened256Mask; if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) { Widened128Mask.clear(); - llvm::scaleShuffleMask(2, Widened256Mask, Widened128Mask); + narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask); } // Try to lower to vshuf64x2/vshuf32x4. @@ -17159,7 +17159,7 @@ static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef Mask, SmallVector Repeated128Mask; if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) { SmallVector PSHUFDMask; - scaleShuffleMask(2, Repeated128Mask, PSHUFDMask); + narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask); return DAG.getBitcast( MVT::v8i64, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, @@ -20247,7 +20247,7 @@ static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In, // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits. SmallVector Mask; int Scale = 64 / OutVT.getScalarSizeInBits(); - scaleShuffleMask(Scale, { 0, 2, 1, 3 }, Mask); + narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask); Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask); if (DstVT.is256BitVector()) @@ -33693,7 +33693,7 @@ static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef Mask, // Narrow the repeated mask to create 32-bit element permutes. SmallVector WordMask = RepeatedMask; if (MaskScalarSizeInBits == 64) - scaleShuffleMask(2, RepeatedMask, WordMask); + narrowShuffleMaskElts(2, RepeatedMask, WordMask); Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI); ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32); @@ -34146,7 +34146,7 @@ static SDValue combineX86ShuffleChain(ArrayRef Inputs, SDValue Root, if (BaseMaskEltSizeInBits > 64) { assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size"); int MaskScale = BaseMaskEltSizeInBits / 64; - scaleShuffleMask(MaskScale, BaseMask, Mask); + narrowShuffleMaskElts(MaskScale, BaseMask, Mask); } else { Mask = SmallVector(BaseMask.begin(), BaseMask.end()); } @@ -38294,7 +38294,7 @@ static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG, if ((NumSrcElts % Mask.size()) == 0) { SmallVector ScaledMask; int Scale = NumSrcElts / Mask.size(); - scaleShuffleMask(Scale, Mask, ScaledMask); + narrowShuffleMaskElts(Scale, Mask, ScaledMask); Mask = std::move(ScaledMask); } else if ((Mask.size() % NumSrcElts) == 0) { // Simplify Mask based on demanded element. diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp index 8c3b185..8463315 100644 --- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -336,8 +336,8 @@ void X86InterleavedAccessGroup::interleave8bitStride4VF8( createUnpackShuffleMask(VT, MaskLowTemp1, true, false); createUnpackShuffleMask(VT, MaskHighTemp1, false, false); - scaleShuffleMask(2, MaskHighTemp1, MaskHighWord); - scaleShuffleMask(2, MaskLowTemp1, MaskLowWord); + narrowShuffleMaskElts(2, MaskHighTemp1, MaskHighWord); + narrowShuffleMaskElts(2, MaskLowTemp1, MaskLowWord); // IntrVec1Low = c0 m0 c1 m1 c2 m2 c3 m3 c4 m4 c5 m5 c6 m6 c7 m7 // IntrVec2Low = y0 k0 y1 k1 y2 k2 y3 k3 y4 k4 y5 k5 y6 k6 y7 k7 Value *IntrVec1Low = @@ -384,8 +384,8 @@ void X86InterleavedAccessGroup::interleave8bitStride4( createUnpackShuffleMask(HalfVT, MaskLowTemp, true, false); createUnpackShuffleMask(HalfVT, MaskHighTemp, false, false); - scaleShuffleMask(2, MaskLowTemp, LowHighMask[0]); - scaleShuffleMask(2, MaskHighTemp, LowHighMask[1]); + narrowShuffleMaskElts(2, MaskLowTemp, LowHighMask[0]); + narrowShuffleMaskElts(2, MaskHighTemp, LowHighMask[1]); // IntrVec1Low = c0 m0 c1 m1 ... c7 m7 | c16 m16 c17 m17 ... c23 m23 // IntrVec1High = c8 m8 c9 m9 ... c15 m15 | c24 m24 c25 m25 ... c31 m31 diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 5d7204a..f052f8e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -1967,7 +1967,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { assert(XNumElts % VWidth == 0 && "Unexpected vector bitcast"); unsigned ScaleFactor = XNumElts / VWidth; SmallVector ScaledMask; - scaleShuffleMask(ScaleFactor, Mask, ScaledMask); + narrowShuffleMaskElts(ScaleFactor, Mask, ScaledMask); // If the shuffled source vector simplifies, cast that value to this // shuffle's type. diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 444290b..5c73e41 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -298,7 +298,7 @@ static bool foldBitcastShuf(Instruction &I, const TargetTransformInfo &TTI) { SmallVector NewMask; assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask"); unsigned ScaleFactor = DestNumElts / SrcNumElts; - scaleShuffleMask(ScaleFactor, Mask, NewMask); + narrowShuffleMaskElts(ScaleFactor, Mask, NewMask); Value *Shuf = Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy), NewMask); I.replaceAllUsesWith(Shuf); diff --git a/llvm/unittests/Analysis/VectorUtilsTest.cpp b/llvm/unittests/Analysis/VectorUtilsTest.cpp index 1a06b09..24b1b0f 100644 --- a/llvm/unittests/Analysis/VectorUtilsTest.cpp +++ b/llvm/unittests/Analysis/VectorUtilsTest.cpp @@ -98,11 +98,11 @@ TEST_F(BasicTest, isSplat) { EXPECT_FALSE(isSplatValue(SplatWithUndefC)); } -TEST_F(BasicTest, scaleShuffleMask) { +TEST_F(BasicTest, narrowShuffleMaskElts) { SmallVector ScaledMask; - scaleShuffleMask(1, {3,2,0,-2}, ScaledMask); + narrowShuffleMaskElts(1, {3,2,0,-2}, ScaledMask); EXPECT_EQ(makeArrayRef(ScaledMask), makeArrayRef({3,2,0,-2})); - scaleShuffleMask(4, {3,2,0,-1}, ScaledMask); + narrowShuffleMaskElts(4, {3,2,0,-1}, ScaledMask); EXPECT_EQ(makeArrayRef(ScaledMask), makeArrayRef({12,13,14,15,8,9,10,11,0,1,2,3,-1,-1,-1,-1})); } -- 2.7.4