From c73b6d6bf7facd0490388aea771b899292877d25 Mon Sep 17 00:00:00 2001 From: Hiroshi Inoue Date: Wed, 20 Jun 2018 05:29:26 +0000 Subject: [PATCH] [NFC] fix trivial typos in comments llvm-svn: 335096 --- llvm/lib/CodeGen/CodeGenPrepare.cpp | 30 +++++++++++++++--------------- llvm/lib/CodeGen/MachineBlockPlacement.cpp | 2 +- llvm/lib/CodeGen/MachinePipeliner.cpp | 16 ++++++++-------- llvm/lib/CodeGen/MachineScheduler.cpp | 10 +++++----- llvm/lib/CodeGen/RegAllocGreedy.cpp | 18 +++++++++--------- llvm/lib/CodeGen/RegisterCoalescer.cpp | 10 +++++----- 6 files changed, 43 insertions(+), 43 deletions(-) diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index faae4a8..595fcf7 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1415,7 +1415,7 @@ SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, /// %x.extract.shift.1 = lshr i64 %arg1, 32 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 /// -/// CodeGen will recoginze the pattern in BB2 and generate BitExtract +/// CodeGen will recognize the pattern in BB2 and generate BitExtract /// instruction. /// Return true if any changes are made. static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, @@ -1461,7 +1461,7 @@ static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, // cmp i16 trunc.result, opnd2 // if (isa(User) && shiftIsLegal - // If the type of the truncate is legal, no trucate will be + // If the type of the truncate is legal, no truncate will be // introduced in other basic blocks. && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) @@ -2087,7 +2087,7 @@ class TypePromotionTransaction { /// Position of an instruction. /// Either an instruction: /// - Is the first in a basic block: BB is used. - /// - Has a previous instructon: PrevInst is used. + /// - Has a previous instruction: PrevInst is used. union { Instruction *PrevInst; BasicBlock *BB; @@ -2362,7 +2362,7 @@ class TypePromotionTransaction { SetOfInstrs &RemovedInsts; public: - /// Remove all reference of \p Inst and optinally replace all its + /// Remove all reference of \p Inst and optionally replace all its /// uses with New. /// \p RemovedInsts Keep track of the instructions removed by this Action. /// \pre If !Inst->use_empty(), then New != nullptr @@ -2863,7 +2863,7 @@ private: /// We have mapping between value A and basic block where value A /// seen to other value B where B was a field in addressing mode represented - /// by A. Also we have an original value C representin an address in some + /// by A. Also we have an original value C representing an address in some /// basic block. Traversing from C through phi and selects we ended up with /// A's in a map. This utility function tries to find a value V which is a /// field in addressing mode C and traversing through phi nodes and selects @@ -3348,7 +3348,7 @@ public: SmallVectorImpl *Truncs, const TargetLowering &TLI); - /// Given a sign/zero extend instruction \p Ext, return the approriate + /// Given a sign/zero extend instruction \p Ext, return the appropriate /// action to promote the operand of \p Ext instead of using Ext. /// \return NULL if no promotable action is possible with the current /// sign extension. @@ -3621,7 +3621,7 @@ Value *TypePromotionHelper::promoteOperandForOther( continue; } - // Otherwise we have to explicity sign extend the operand. + // Otherwise we have to explicitly sign extend the operand. // Check if Ext was reused to extend an operand. if (!ExtForOpnd) { // If yes, create a new one. @@ -4866,7 +4866,7 @@ bool CodeGenPrepare::mergeSExts(Function &F) { } if (!DT.dominates(Pt, Inst)) // Give up if we need to merge in a common dominator as the - // expermients show it is not profitable. + // experiments show it is not profitable. continue; Inst->replaceAllUsesWith(Pt); RemovedInsts.insert(Inst); @@ -6178,7 +6178,7 @@ bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { /// For the instruction sequence of store below, F and I values /// are bundled together as an i64 value before being stored into memory. -/// Sometimes it is more efficent to generate separate stores for F and I, +/// Sometimes it is more efficient to generate separate stores for F and I, /// which can remove the bitwise instructions or sink them to colder places. /// /// (store (or (zext (bitcast F to i32) to i64), @@ -6756,8 +6756,8 @@ bool CodeGenPrepare::splitBranchCondition(Function &F) { Br1->setCondition(Cond1); LogicOp->eraseFromParent(); - // Depending on the conditon we have to either replace the true or the false - // successor of the original branch instruction. + // Depending on the condition we have to either replace the true or the + // false successor of the original branch instruction. if (Opc == Instruction::And) Br1->setSuccessor(0, TmpBB); else @@ -6810,8 +6810,8 @@ bool CodeGenPrepare::splitBranchCondition(Function &F) { // We have flexibility in setting Prob for BB1 and Prob for NewBB. // The requirement is that // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) - // = TrueProb for orignal BB. - // Assuming the orignal weights are A and B, one choice is to set BB1's + // = TrueProb for original BB. + // Assuming the original weights are A and B, one choice is to set BB1's // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice // assumes that // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. @@ -6845,8 +6845,8 @@ bool CodeGenPrepare::splitBranchCondition(Function &F) { // We have flexibility in setting Prob for BB1 and Prob for TmpBB. // The requirement is that // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) - // = FalseProb for orignal BB. - // Assuming the orignal weights are A and B, one choice is to set BB1's + // = FalseProb for original BB. + // Assuming the original weights are A and B, one choice is to set BB1's // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice // assumes that // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp index 7ca1994..5e39e53 100644 --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -2759,7 +2759,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { TailDupSize = TailDupPlacementAggressiveThreshold; TargetPassConfig *PassConfig = &getAnalysis(); - // For agressive optimization, we can adjust some thresholds to be less + // For aggressive optimization, we can adjust some thresholds to be less // conservative. if (PassConfig->getOptLevel() >= CodeGenOpt::Aggressive) { // At O3 we should be more willing to copy blocks for tail duplication. This diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp index ef96cec..5e1760e 100644 --- a/llvm/lib/CodeGen/MachinePipeliner.cpp +++ b/llvm/lib/CodeGen/MachinePipeliner.cpp @@ -17,7 +17,7 @@ // interval, register requirements, and stage count. See the papers: // // "Swing Modulo Scheduling: A Lifetime-Sensitive Approach", by J. Llosa, -// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Processings of the 1996 +// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Proceedings of the 1996 // Conference on Parallel Architectures and Compilation Techiniques. // // "Lifetime-Sensitive Modulo Scheduling in a Production Environment", by J. @@ -570,7 +570,7 @@ public: #endif }; -/// This class repesents the scheduled code. The main data structure is a +/// This class represents the scheduled code. The main data structure is a /// map from scheduled cycle to instructions. During scheduling, the /// data structure explicitly represents all stages/iterations. When /// the algorithm finshes, the schedule is collapsed into a single stage, @@ -1437,7 +1437,7 @@ unsigned SwingSchedulerDAG::calculateResMII() { /// Iterate over each circuit. Compute the delay(c) and distance(c) /// for each circuit. The II needs to satisfy the inequality /// delay(c) - II*distance(c) <= 0. For each circuit, choose the smallest -/// II that satistifies the inequality, and the RecMII is the maximum +/// II that satisfies the inequality, and the RecMII is the maximum /// of those values. unsigned SwingSchedulerDAG::calculateRecMII(NodeSetType &NodeSets) { unsigned RecMII = 0; @@ -1617,7 +1617,7 @@ void SwingSchedulerDAG::findCircuits(NodeSetType &NodeSets) { } /// Return true for DAG nodes that we ignore when computing the cost functions. -/// We ignore the back-edge recurrence in order to avoid unbounded recurison +/// We ignore the back-edge recurrence in order to avoid unbounded recursion /// in the calculation of the ASAP, ALAP, etc functions. static bool ignoreDependence(const SDep &D, bool isPred) { if (D.isArtificial()) @@ -2330,7 +2330,7 @@ void SwingSchedulerDAG::generatePipelinedLoop(SMSchedule &Schedule) { // Remember the registers that are used in different stages. The index is // the iteration, or stage, that the instruction is scheduled in. This is - // a map between register names in the orignal block and the names created + // a map between register names in the original block and the names created // in each stage of the pipelined loop. ValueMapTy *VRMap = new ValueMapTy[(MaxStageCount + 1) * 2]; InstrMapTy InstrMap; @@ -2825,7 +2825,7 @@ void SwingSchedulerDAG::generateExistingPhis( /// Generate Phis for the specified block in the generated pipelined code. /// These are new Phis needed because the definition is scheduled after the -/// use in the pipelened sequence. +/// use in the pipelined sequence. void SwingSchedulerDAG::generatePhis( MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2, MachineBasicBlock *KernelBB, SMSchedule &Schedule, ValueMapTy *VRMap, @@ -3711,7 +3711,7 @@ void SMSchedule::computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart, int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG) { // Iterate over each instruction that has been scheduled already. The start - // slot computuation depends on whether the previously scheduled instruction + // slot computation depends on whether the previously scheduled instruction // is a predecessor or successor of the specified instruction. for (int cycle = getFirstCycle(); cycle <= LastCycle; ++cycle) { @@ -3892,7 +3892,7 @@ void SMSchedule::orderDependence(SwingSchedulerDAG *SSD, SUnit *SU, bool SMSchedule::isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi) { if (!Phi.isPHI()) return false; - assert(Phi.isPHI() && "Expecing a Phi."); + assert(Phi.isPHI() && "Expecting a Phi."); SUnit *DefSU = SSD->getSUnit(&Phi); unsigned DefCycle = cycleScheduled(DefSU); int DefStage = stageScheduled(DefSU); diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index c2c0d34..502d18f 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -345,7 +345,7 @@ ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { /// This design avoids exposing scheduling boundaries to the DAG builder, /// simplifying the DAG builder's support for "special" target instructions. /// At the same time the design allows target schedulers to operate across -/// scheduling boundaries, for example to bundle the boudary instructions +/// scheduling boundaries, for example to bundle the boundary instructions /// without reordering them. This creates complexity, because the target /// scheduler must update the RegionBegin and RegionEnd positions cached by /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler @@ -1708,7 +1708,7 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { // If GlobalSegment is killed at the LocalLI->start, the call to find() // returned the next global segment. But if GlobalSegment overlaps with - // LocalLI->start, then advance to the next segement. If a hole in GlobalLI + // LocalLI->start, then advance to the next segment. If a hole in GlobalLI // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. if (GlobalSegment->contains(LocalLI->beginIndex())) ++GlobalSegment; @@ -1925,7 +1925,7 @@ getNextResourceCycle(unsigned PIdx, unsigned Cycles) { /// The scheduler supports two modes of hazard recognition. The first is the /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that /// supports highly complicated in-order reservation tables -/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. +/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. /// /// The second is a streamlined mechanism that checks for hazards based on /// simple counters that the scheduler itself maintains. It explicitly checks @@ -2868,7 +2868,7 @@ void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU, << Cand.RPDelta.Excess.getUnitInc() << "\n"); } -/// Apply a set of heursitics to a new candidate. Heuristics are currently +/// Apply a set of heuristics to a new candidate. Heuristics are currently /// hierarchical. This may be more efficient than a graduated cost model because /// we don't need to evaluate all aspects of the model for each node in the /// queue. But it's really done to make the heuristics easier to debug and @@ -3239,7 +3239,7 @@ void PostGenericScheduler::registerRoots() { } } -/// Apply a set of heursitics to a new candidate for PostRA scheduling. +/// Apply a set of heuristics to a new candidate for PostRA scheduling. /// /// \param Cand provides the policy and current best candidate. /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index d9ff76b..07b201b 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -312,16 +312,16 @@ class RAGreedy : public MachineFunctionPass, /// Track new eviction. /// The Evictor vreg has evicted the Evictee vreg from Physreg. - /// \praram PhysReg The phisical register Evictee was evicted from. - /// \praram Evictor The evictor Vreg that evicted Evictee. - /// \praram Evictee The evictee Vreg. + /// \param PhysReg The phisical register Evictee was evicted from. + /// \param Evictor The evictor Vreg that evicted Evictee. + /// \param Evictee The evictee Vreg. void addEviction(unsigned PhysReg, unsigned Evictor, unsigned Evictee) { Evictees[Evictee].first = Evictor; Evictees[Evictee].second = PhysReg; } /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg. - /// \praram Evictee The evictee vreg. + /// \param Evictee The evictee vreg. /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if /// nobody has evicted Evictee from PhysReg. EvictorInfo getEvictor(unsigned Evictee) { @@ -989,7 +989,7 @@ bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg, return true; } -/// Return tthe physical register that will be best +/// Return the physical register that will be best /// candidate for eviction by a local split interval that will be created /// between Start and End. /// @@ -1406,7 +1406,7 @@ BlockFrequency RAGreedy::calcSpillCost() { /// Evictee %0 is intended for region splitting with split candidate /// physreg0 (the reg %0 was evicted from). /// Region splitting creates a local interval because of interference with the -/// evictor %1 (normally region spliitting creates 2 interval, the "by reg" +/// evictor %1 (normally region splitting creates 2 interval, the "by reg" /// and "by stack" intervals and local interval created when interference /// occurs). /// One of the split intervals ends up evicting %2 from physreg1. @@ -1527,8 +1527,8 @@ bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit, return false; } - // The local interval is not able to find non interferening assignment and not - // able to evict a less worthy interval, therfore, it can cause a spill. + // The local interval is not able to find non interferencing assignment and + // not able to evict a less worthy interval, therfore, it can cause a spill. return true; } @@ -2599,7 +2599,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg, unsigned ItVirtReg = (*It)->reg; enqueue(RecoloringQueue, *It); assert(VRM->hasPhys(ItVirtReg) && - "Interferences are supposed to be with allocated vairables"); + "Interferences are supposed to be with allocated variables"); // Record the current allocation. VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg); diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index 5093881..c18f024 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -641,7 +641,7 @@ bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA, return false; } -/// Copy segements with value number @p SrcValNo from liverange @p Src to live +/// Copy segments with value number @p SrcValNo from liverange @p Src to live /// range @Dst and use value number @p DstValNo there. static void addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, const VNInfo *SrcValNo) { @@ -1187,7 +1187,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, I != E; ++I) { MachineOperand &MO = CopyMI->getOperand(I); if (MO.isReg()) { - assert(MO.isImplicit() && "No explicit operands after implict operands."); + assert(MO.isImplicit() && "No explicit operands after implicit operands."); // Discard VReg implicit defs. if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) ImplicitOps.push_back(MO); @@ -1318,7 +1318,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, // %1 = somedef ; %1 GR8 // dead ECX = remat ; implicit-def CL // = somedef %1 ; %1 GR8 - // %1 will see the inteferences with CL but not with CH since + // %1 will see the interferences with CL but not with CH since // no live-ranges would have been created for ECX. // Fix that! SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); @@ -1368,8 +1368,8 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, } bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) { - // ProcessImpicitDefs may leave some copies of values, it only removes - // local variables. When we have a copy like: + // ProcessImplicitDefs may leave some copies of values, it only + // removes local variables. When we have a copy like: // // %1 = COPY undef %2 // -- 2.7.4