From 3fcafa2cdb0d128507308a05555049d3424da2a5 Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Sun, 14 Dec 2014 09:43:50 +0000 Subject: [PATCH] Loop Vectorizer minor changes in the code - some comments, function names, identation. Reviewed here: http://reviews.llvm.org/D6527 llvm-svn: 224218 --- llvm/include/llvm/Analysis/TargetTransformInfo.h | 4 ++-- llvm/lib/Analysis/TargetTransformInfo.cpp | 8 ++++---- llvm/lib/Target/X86/X86TargetTransformInfo.cpp | 10 +++++----- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 8af8f77..4bd5dd8 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -274,8 +274,8 @@ public: /// AVX2 allows masks for consecutive load and store for i32 and i64 elements. /// AVX-512 architecture will also allow masks for non-consecutive memory /// accesses. - virtual bool isLegalPredicatedStore(Type *DataType, int Consecutive) const; - virtual bool isLegalPredicatedLoad (Type *DataType, int Consecutive) const; + virtual bool isLegalMaskedStore(Type *DataType, int Consecutive) const; + virtual bool isLegalMaskedLoad (Type *DataType, int Consecutive) const; /// \brief Return the cost of the scaling factor used in the addressing /// mode represented by AM for this target, for a load/store diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index d10bdd8..8fc1ac6 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -101,13 +101,13 @@ bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const { return PrevTTI->isLegalICmpImmediate(Imm); } -bool TargetTransformInfo::isLegalPredicatedLoad(Type *DataType, - int Consecutive) const { +bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, + int Consecutive) const { return false; } -bool TargetTransformInfo::isLegalPredicatedStore(Type *DataType, - int Consecutive) const { +bool TargetTransformInfo::isLegalMaskedStore(Type *DataType, + int Consecutive) const { return false; } diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index bed78ac..c55f311 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -111,8 +111,8 @@ public: Type *Ty) const override; unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty) const override; - bool isLegalPredicatedLoad (Type *DataType, int Consecutive) const override; - bool isLegalPredicatedStore(Type *DataType, int Consecutive) const override; + bool isLegalMaskedLoad (Type *DataType, int Consecutive) const override; + bool isLegalMaskedStore(Type *DataType, int Consecutive) const override; /// @} }; @@ -1159,7 +1159,7 @@ unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx, return X86TTI::getIntImmCost(Imm, Ty); } -bool X86TTI::isLegalPredicatedLoad(Type *DataType, int Consecutive) const { +bool X86TTI::isLegalMaskedLoad(Type *DataType, int Consecutive) const { int ScalarWidth = DataType->getScalarSizeInBits(); // Todo: AVX512 allows gather/scatter, works with strided and random as well @@ -1170,7 +1170,7 @@ bool X86TTI::isLegalPredicatedLoad(Type *DataType, int Consecutive) const { return false; } -bool X86TTI::isLegalPredicatedStore(Type *DataType, int Consecutive) const { - return isLegalPredicatedLoad(DataType, Consecutive); +bool X86TTI::isLegalMaskedStore(Type *DataType, int Consecutive) const { + return isLegalMaskedLoad(DataType, Consecutive); } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index ef52da8..dca6a0c 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1871,7 +1871,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { if (Reverse) { // If the address is consecutive but reversed, then the - // wide store needs to start at the last vector element. + // wide load needs to start at the last vector element. PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF)); PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF)); } @@ -5341,7 +5341,7 @@ bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB, case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: - return false; + return false; } } @@ -5385,7 +5385,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { MaxVectorSize = 1; } - assert(MaxVectorSize <= 32 && "Did not expect to pack so many elements" + assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" " into one vector!"); unsigned VF = MaxVectorSize; -- 2.7.4