From: Hiroshi Inoue Date: Tue, 5 Feb 2019 08:30:48 +0000 (+0000) Subject: [NFC] fix trivial typos in comments X-Git-Tag: llvmorg-10-init~12795 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=02a2bb2f54659e7830946bd20ee121ce7d80ff65;p=platform%2Fupstream%2Fllvm.git [NFC] fix trivial typos in comments llvm-svn: 353147 --- diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp index bd133a4..e3d4478 100644 --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -884,7 +884,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object)); if (AR != MustAlias) IsMustAlias = false; - // Operand doesnt alias 'Object', continue looking for other aliases + // Operand doesn't alias 'Object', continue looking for other aliases if (AR == NoAlias) continue; // Operand aliases 'Object', but call doesn't modify it. Strengthen @@ -1019,7 +1019,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, // heap state at the point the guard is issued needs to be consistent in case // the guard invokes the "deopt" continuation. - // NB! This function is *not* commutative, so we specical case two + // NB! This function is *not* commutative, so we special case two // possibilities for guard intrinsics. if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp index 5ec1a06..4649b30 100644 --- a/llvm/lib/Analysis/GlobalsModRef.cpp +++ b/llvm/lib/Analysis/GlobalsModRef.cpp @@ -596,7 +596,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) { } // All non-call instructions we use the primary predicates for whether - // thay read or write memory. + // they read or write memory. if (I.mayReadFromMemory()) FI.addModRefInfo(ModRefInfo::Ref); if (I.mayWriteToMemory()) @@ -790,10 +790,10 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV, } // FIXME: It would be good to handle other obvious no-alias cases here, but - // it isn't clear how to do so reasonbly without building a small version + // it isn't clear how to do so reasonably without building a small version // of BasicAA into this code. We could recurse into AAResultBase::alias // here but that seems likely to go poorly as we're inside the - // implementation of such a query. Until then, just conservatievly retun + // implementation of such a query. Until then, just conservatively return // false. return false; } while (!Inputs.empty()); diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp index 23d1c5b..0c1e57f 100644 --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -1009,7 +1009,7 @@ bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop, // If we started from an UnknownSCEV, and managed to build an addRecurrence // only after enabling Assume with PSCEV, this means we may have encountered // cast instructions that required adding a runtime check in order to - // guarantee the correctness of the AddRecurence respresentation of the + // guarantee the correctness of the AddRecurrence respresentation of the // induction. if (PhiScev != AR && SymbolicPhi) { SmallVector Casts; diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 57ffb2d..0e20739 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -76,7 +76,7 @@ static cl::opt LocallyHotCallSiteThreshold( static cl::opt ColdCallSiteRelFreq( "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, - cl::desc("Maxmimum block frequency, expressed as a percentage of caller's " + cl::desc("Maximum block frequency, expressed as a percentage of caller's " "entry frequency, for a callsite to be cold in the absence of " "profile information.")); @@ -1675,7 +1675,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { /// blocks to see if all their incoming edges are dead or not. void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) { auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) { - // A CFG edge is dead if the predecessor is dead or the predessor has a + // A CFG edge is dead if the predecessor is dead or the predecessor has a // known successor which is not the one under exam. return (DeadBlocks.count(Pred) || (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ)); diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp index 03df953..4e915be 100644 --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -624,7 +624,7 @@ bool LazyValueInfoImpl::solveBlockValueImpl(ValueLatticeElement &Res, // and the like to prove non-nullness, but it's not clear that's worth it // compile time wise. The context-insensitive value walk done inside // isKnownNonZero gets most of the profitable cases at much less expense. - // This does mean that we have a sensativity to where the defining + // This does mean that we have a sensitivity to where the defining // instruction is placed, even if it could legally be hoisted much higher. // That is unfortunate. PointerType *PT = dyn_cast(BBI->getType()); diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index b96f680..5130807 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -2251,7 +2251,7 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { // Match the types so we can compare the stride and the BETakenCount. // The Stride can be positive/negative, so we sign extend Stride; - // The backdgeTakenCount is non-negative, so we zero extend BETakenCount. + // The backedgeTakenCount is non-negative, so we zero extend BETakenCount. const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType()); uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType()); diff --git a/llvm/lib/Analysis/MemorySSAUpdater.cpp b/llvm/lib/Analysis/MemorySSAUpdater.cpp index 54cd265..53950be 100644 --- a/llvm/lib/Analysis/MemorySSAUpdater.cpp +++ b/llvm/lib/Analysis/MemorySSAUpdater.cpp @@ -598,7 +598,7 @@ void MemorySSAUpdater::applyUpdates(ArrayRef Updates, if (!RevDeleteUpdates.empty()) { // Update for inserted edges: use newDT and snapshot CFG as if deletes had - // not occured. + // not occurred. // FIXME: This creates a new DT, so it's more expensive to do mix // delete/inserts vs just inserts. We can do an incremental update on the DT // to revert deletes, than re-delete the edges. Teaching DT to do this, is @@ -696,7 +696,7 @@ void MemorySSAUpdater::applyInsertUpdates(ArrayRef Updates, // Map a BB to its predecessors: added + previously existing. To get a // deterministic order, store predecessors as SetVectors. The order in each - // will be defined by teh order in Updates (fixed) and the order given by + // will be defined by the order in Updates (fixed) and the order given by // children<> (also fixed). Since we further iterate over these ordered sets, // we lose the information of multiple edges possibly existing between two // blocks, so we'll keep and EdgeCount map for that. diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index 8e1feeb..f64ee6a 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -991,7 +991,7 @@ void InterleavedAccessInfo::analyzeInterleaving( // that all the pointers in the group don't wrap. // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on - // peeling (if it is a non-reveresed accsess -- see Case 3). + // peeling (if it is a non-reversed accsess -- see Case 3). Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) {