From e17e055b75de5d084eee64ecba2111bde629a519 Mon Sep 17 00:00:00 2001 From: Nirav Dave Date: Wed, 28 Sep 2016 15:50:43 +0000 Subject: [PATCH] In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, nhaehnle, jyknight Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 282600 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 391 +++------ llvm/lib/CodeGen/TargetLoweringBase.cpp | 2 +- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 10 - llvm/test/CodeGen/AArch64/argument-blocks.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-abi.ll | 5 +- llvm/test/CodeGen/AArch64/arm64-memset-inline.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-stur.ll | 7 +- llvm/test/CodeGen/AArch64/merge-store.ll | 5 +- .../test/CodeGen/AArch64/vector_merge_dep_check.ll | 3 +- llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll | 3 - llvm/test/CodeGen/AMDGPU/debugger-insert-nops.ll | 24 +- llvm/test/CodeGen/AMDGPU/merge-stores.ll | 30 +- llvm/test/CodeGen/AMDGPU/private-element-size.ll | 8 +- .../CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll | 17 +- .../vgpr-spill-emergency-stack-slot-compute.ll | 7 + .../AMDGPU/vgpr-spill-emergency-stack-slot.ll | 6 + .../CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll | 3 +- llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll | 4 +- llvm/test/CodeGen/ARM/ifcvt10.ll | 2 - llvm/test/CodeGen/ARM/memset-inline.ll | 10 +- llvm/test/CodeGen/ARM/static-addr-hoisting.ll | 6 +- llvm/test/CodeGen/BPF/undef.ll | 60 +- llvm/test/CodeGen/MSP430/Inst16mm.ll | 2 +- llvm/test/CodeGen/Mips/cconv/arguments-float.ll | 24 +- llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll | 44 +- llvm/test/CodeGen/Mips/fastcc.ll | 77 +- llvm/test/CodeGen/Mips/load-store-left-right.ll | 126 +-- llvm/test/CodeGen/Mips/micromips-li.ll | 2 +- llvm/test/CodeGen/Mips/mips64-f128.ll | 2 +- llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll | 46 +- llvm/test/CodeGen/Mips/msa/i5_ld_st.ll | 32 +- llvm/test/CodeGen/Mips/o32_cc_byval.ll | 46 +- llvm/test/CodeGen/Mips/o32_cc_vararg.ll | 4 +- llvm/test/CodeGen/PowerPC/anon_aggr.ll | 31 +- llvm/test/CodeGen/PowerPC/complex-return.ll | 12 +- llvm/test/CodeGen/PowerPC/jaggedstructs.ll | 16 +- .../CodeGen/PowerPC/ppc64-align-long-double.ll | 46 +- llvm/test/CodeGen/PowerPC/structsinmem.ll | 28 +- llvm/test/CodeGen/PowerPC/structsinregs.ll | 60 +- llvm/test/CodeGen/SystemZ/unaligned-01.ll | 5 +- .../test/CodeGen/Thumb/2010-07-15-debugOrdering.ll | 2 +- llvm/test/CodeGen/Thumb/stack-access.ll | 8 +- .../CodeGen/X86/2010-09-17-SideEffectsInChain.ll | 2 +- .../CodeGen/X86/2012-11-28-merge-store-alias.ll | 2 +- llvm/test/CodeGen/X86/MergeConsecutiveStores.ll | 14 +- llvm/test/CodeGen/X86/avx512-mask-op.ll | 4 - llvm/test/CodeGen/X86/chain_order.ll | 4 +- .../CodeGen/X86/clear_upper_vector_element_bits.ll | 56 +- llvm/test/CodeGen/X86/combiner-aa-0.ll | 20 - llvm/test/CodeGen/X86/combiner-aa-1.ll | 23 - llvm/test/CodeGen/X86/copy-eflags.ll | 17 +- llvm/test/CodeGen/X86/dag-merge-fast-accesses.ll | 12 +- .../X86/dont-trunc-store-double-to-float.ll | 6 +- .../extractelement-legalization-store-ordering.ll | 8 +- llvm/test/CodeGen/X86/i256-add.ll | 8 +- llvm/test/CodeGen/X86/i386-shrink-wrapping.ll | 5 +- llvm/test/CodeGen/X86/live-range-nosubreg.ll | 5 +- .../CodeGen/X86/merge-consecutive-loads-128.ll | 20 +- .../CodeGen/X86/merge-consecutive-loads-256.ll | 8 +- .../X86/merge-store-partially-alias-loads.ll | 8 +- llvm/test/CodeGen/X86/pr18023.ll | 31 - llvm/test/CodeGen/X86/split-store.ll | 20 +- llvm/test/CodeGen/X86/stores-merging.ll | 11 +- llvm/test/CodeGen/X86/vector-compare-results.ll | 730 ++++++++--------- llvm/test/CodeGen/X86/vector-lzcnt-128.ll | 276 +++---- .../CodeGen/X86/vector-shuffle-variable-128.ll | 906 +++++++++------------ .../CodeGen/X86/vector-shuffle-variable-256.ll | 232 +++--- llvm/test/CodeGen/X86/win32-eh.ll | 157 ++-- llvm/test/CodeGen/XCore/varargs.ll | 2 +- 70 files changed, 1735 insertions(+), 2088 deletions(-) delete mode 100644 llvm/test/CodeGen/X86/combiner-aa-0.ll delete mode 100644 llvm/test/CodeGen/X86/combiner-aa-1.ll delete mode 100644 llvm/test/CodeGen/X86/pr18023.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index d6f6185..08af326 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -52,10 +52,6 @@ STATISTIC(SlicedLoads, "Number of load sliced"); namespace { static cl::opt - CombinerAA("combiner-alias-analysis", cl::Hidden, - cl::desc("Enable DAG combiner alias-analysis heuristics")); - - static cl::opt CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, cl::desc("Enable DAG combiner's use of IR alias analysis")); @@ -408,15 +404,12 @@ namespace { /// Holds a pointer to an LSBaseSDNode as well as information on where it /// is located in a sequence of memory operations connected by a chain. struct MemOpLink { - MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): - MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } + MemOpLink(LSBaseSDNode *N, int64_t Offset) + : MemNode(N), OffsetFromBase(Offset) {} // Ptr to the mem node. LSBaseSDNode *MemNode; // Offset from the base ptr. int64_t OffsetFromBase; - // What is the sequence number of this mem node. - // Lowest mem operand in the DAG starts at zero. - unsigned SequenceNum; }; /// This is a helper function for visitMUL to check the profitability @@ -431,7 +424,6 @@ namespace { /// constant build_vector of the stored constant values in Stores. SDValue getMergedConstantVectorStore(SelectionDAG &DAG, const SDLoc &SL, ArrayRef Stores, - SmallVectorImpl &Chains, EVT Ty) const; /// This is a helper function for visitAND and visitZERO_EXTEND. Returns @@ -453,10 +445,8 @@ namespace { /// This is a helper function for MergeConsecutiveStores. /// Stores that may be merged are placed in StoreNodes. - /// Loads that may alias with those stores are placed in AliasLoadNodes. - void getStoreMergeAndAliasCandidates( - StoreSDNode* St, SmallVectorImpl &StoreNodes, - SmallVectorImpl &AliasLoadNodes); + void getStoreMergeCandidates(StoreSDNode *St, + SmallVectorImpl &StoreNodes); /// Helper function for MergeConsecutiveStores. Checks if /// Candidate stores have indirect dependency through their @@ -1606,11 +1596,9 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) { Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); } - // Add users to worklist if AA is enabled, since it may introduce - // a lot of new chained token factors while removing memory deps. - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - return CombineTo(N, Result, UseAA /*add to worklist*/); + // Add users to worklist, since we may introduce a lot of new + // chained token factors while removing memory deps. + return CombineTo(N, Result, true /*add to worklist*/); } return Result; @@ -10169,11 +10157,22 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) { // TODO: Handle store large -> read small portion. // TODO: Handle TRUNCSTORE/LOADEXT if (ISD::isNormalLoad(N) && !LD->isVolatile()) { - if (ISD::isNON_TRUNCStore(Chain.getNode())) { + // Either a direct store, or a store off of a TokenFactor can be + // forwarded. + if (Chain->getOpcode() == ISD::TokenFactor) { + for (const SDValue &ChainOp : Chain->op_values()) { + if (ISD::isNON_TRUNCStore(ChainOp.getNode())) { + StoreSDNode *PrevST = cast(ChainOp); + if (PrevST->getBasePtr() == Ptr && + PrevST->getValue().getValueType() == N->getValueType(0)) + return CombineTo(N, PrevST->getOperand(1), Chain); + } + } + } else if (ISD::isNON_TRUNCStore(Chain.getNode())) { StoreSDNode *PrevST = cast(Chain); if (PrevST->getBasePtr() == Ptr && PrevST->getValue().getValueType() == N->getValueType(0)) - return CombineTo(N, Chain.getOperand(1), Chain); + return CombineTo(N, PrevST->getOperand(1), Chain); } } @@ -10191,14 +10190,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) { } } - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif - if (UseAA && LD->isUnindexed()) { + if (LD->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes. SDValue BetterChain = FindBetterChain(N, Chain); @@ -11277,14 +11269,14 @@ bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, return false; } -SDValue DAGCombiner::getMergedConstantVectorStore( - SelectionDAG &DAG, const SDLoc &SL, ArrayRef Stores, - SmallVectorImpl &Chains, EVT Ty) const { +SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG, + const SDLoc &SL, + ArrayRef Stores, + EVT Ty) const { SmallVector BuildVector; for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { StoreSDNode *St = cast(Stores[I].MemNode); - Chains.push_back(St->getChain()); BuildVector.push_back(St->getValue()); } @@ -11300,21 +11292,8 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts( int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; - unsigned LatestNodeUsed = 0; - - for (unsigned i=0; i < NumStores; ++i) { - // Find a chain for the new wide-store operand. Notice that some - // of the store nodes that we found may not be selected for inclusion - // in the wide store. The chain we use needs to be the chain of the - // latest store node which is *used* and replaced by the wide store. - if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) - LatestNodeUsed = i; - } - - SmallVector Chains; // The latest Node in the DAG. - LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; SDLoc DL(StoreNodes[0].MemNode); SDValue StoredVal; @@ -11330,7 +11309,7 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts( assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); if (IsConstantSrc) { - StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Chains, Ty); + StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty); } else { SmallVector Ops; for (unsigned i = 0; i < NumStores; ++i) { @@ -11340,7 +11319,6 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts( if (Val.getValueType() != MemVT) return false; Ops.push_back(Val); - Chains.push_back(St->getChain()); } // Build the extracted vector elements back into a vector. @@ -11360,7 +11338,6 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts( for (unsigned i = 0; i < NumStores; ++i) { unsigned Idx = IsLE ? (NumStores - 1 - i) : i; StoreSDNode *St = cast(StoreNodes[Idx].MemNode); - Chains.push_back(St->getChain()); SDValue Val = St->getValue(); StoreInt <<= ElementSizeBytes * 8; @@ -11378,7 +11355,11 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts( StoredVal = DAG.getConstant(StoreInt, DL, StoreTy); } - assert(!Chains.empty()); + SmallVector Chains; + + // Gather all Chains we're inheriting + for (unsigned i = 0; i < NumStores; ++i) + Chains.push_back(StoreNodes[i].MemNode->getChain()); SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal, @@ -11386,45 +11367,19 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts( FirstInChain->getPointerInfo(), FirstInChain->getAlignment()); - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - if (UseAA) { - // Replace all merged stores with the new store. - for (unsigned i = 0; i < NumStores; ++i) - CombineTo(StoreNodes[i].MemNode, NewStore); - } else { - // Replace the last store with the new store. - CombineTo(LatestOp, NewStore); - // Erase all other stores. - for (unsigned i = 0; i < NumStores; ++i) { - if (StoreNodes[i].MemNode == LatestOp) - continue; - StoreSDNode *St = cast(StoreNodes[i].MemNode); - // ReplaceAllUsesWith will replace all uses that existed when it was - // called, but graph optimizations may cause new ones to appear. For - // example, the case in pr14333 looks like - // - // St's chain -> St -> another store -> X - // - // And the only difference from St to the other store is the chain. - // When we change it's chain to be St's chain they become identical, - // get CSEed and the net result is that X is now a use of St. - // Since we know that St is redundant, just iterate. - while (!St->use_empty()) - DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); - deleteAndRecombine(St); - } - } + // Replace all merged stores with the new store + for (unsigned i = 0; i < NumStores; ++i) + CombineTo(StoreNodes[i].MemNode, NewStore); return true; } -void DAGCombiner::getStoreMergeAndAliasCandidates( - StoreSDNode* St, SmallVectorImpl &StoreNodes, - SmallVectorImpl &AliasLoadNodes) { +void DAGCombiner::getStoreMergeCandidates( + StoreSDNode *St, SmallVectorImpl &StoreNodes) { // This holds the base pointer, index, and the offset in bytes from the base // pointer. BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); + EVT MemVT = St->getMemoryVT(); // We must have a base and an offset. if (!BasePtr.Base.getNode()) @@ -11434,104 +11389,49 @@ void DAGCombiner::getStoreMergeAndAliasCandidates( if (BasePtr.Base.isUndef()) return; - // Walk up the chain and look for nodes with offsets from the same - // base pointer. Stop when reaching an instruction with a different kind - // or instruction which has a different base pointer. - EVT MemVT = St->getMemoryVT(); - unsigned Seq = 0; - StoreSDNode *Index = St; - - - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - - if (UseAA) { - // Look at other users of the same chain. Stores on the same chain do not - // alias. If combiner-aa is enabled, non-aliasing stores are canonicalized - // to be on the same chain, so don't bother looking at adjacent chains. - - SDValue Chain = St->getChain(); - for (auto I = Chain->use_begin(), E = Chain->use_end(); I != E; ++I) { - if (StoreSDNode *OtherST = dyn_cast(*I)) { - if (I.getOperandNo() != 0) - continue; - - if (OtherST->isVolatile() || OtherST->isIndexed()) - continue; - - if (OtherST->getMemoryVT() != MemVT) - continue; - - BaseIndexOffset Ptr = BaseIndexOffset::match(OtherST->getBasePtr(), DAG); - - if (Ptr.equalBaseIndex(BasePtr)) - StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset, Seq++)); - } - } - - return; - } - - while (Index) { - // If the chain has more than one use, then we can't reorder the mem ops. - if (Index != St && !SDValue(Index, 0)->hasOneUse()) - break; - - // Find the base pointer and offset for this memory node. - BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); - - // Check that the base pointer is the same as the original one. - if (!Ptr.equalBaseIndex(BasePtr)) - break; - - // The memory operands must not be volatile. - if (Index->isVolatile() || Index->isIndexed()) - break; - - // No truncation. - if (Index->isTruncatingStore()) - break; + // We looking for a root node which is an ancestor to all mergable + // stores. We search up through a load, to our root and then down + // through all children. For instance we will find Store{1,2,3} if + // St is Store1, Store2. or Store3 where the root is not a load + // which always true for nonvolatile ops. TODO: Expand + // the search to find all valid candidates through multiple layers of loads. + // + // Root + // |-------|-------| + // Load Load Store3 + // | | + // Store1 Store2 + // + // FIXME: We should be able to climb and + // descend TokenFactors to find candidates as well. - // The stored memory type must be the same. - if (Index->getMemoryVT() != MemVT) - break; + SDNode *RootNode = (St->getChain()).getNode(); - // We do not allow under-aligned stores in order to prevent - // overriding stores. NOTE: this is a bad hack. Alignment SHOULD - // be irrelevant here; what MATTERS is that we not move memory - // operations that potentially overlap past each-other. - if (Index->getAlignment() < MemVT.getStoreSize()) - break; + // Set of Parents of Candidates + std::set CandidateParents; - // We found a potential memory operand to merge. - StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); - - // Find the next memory operand in the chain. If the next operand in the - // chain is a store then move up and continue the scan with the next - // memory operand. If the next operand is a load save it and use alias - // information to check if it interferes with anything. - SDNode *NextInChain = Index->getChain().getNode(); - while (1) { - if (StoreSDNode *STn = dyn_cast(NextInChain)) { - // We found a store node. Use it for the next iteration. - Index = STn; - break; - } else if (LoadSDNode *Ldn = dyn_cast(NextInChain)) { - if (Ldn->isVolatile()) { - Index = nullptr; - break; + if (LoadSDNode *Ldn = dyn_cast(RootNode)) { + RootNode = Ldn->getChain().getNode(); + for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I) + if (I.getOperandNo() == 0 && isa(*I)) // walk down chain + CandidateParents.insert(*I); + } else + CandidateParents.insert(RootNode); + + // check all parents of mergable children + for (auto P = CandidateParents.begin(); P != CandidateParents.end(); ++P) + for (auto I = (*P)->use_begin(), E = (*P)->use_end(); I != E; ++I) + if (I.getOperandNo() == 0) + if (StoreSDNode *OtherST = dyn_cast(*I)) { + if (OtherST->isVolatile() || OtherST->isIndexed()) + continue; + if (OtherST->getMemoryVT() != MemVT) + continue; + BaseIndexOffset Ptr = + BaseIndexOffset::match(OtherST->getBasePtr(), DAG); + if (Ptr.equalBaseIndex(BasePtr)) + StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset)); } - - // Save the load node for later. Continue the scan. - AliasLoadNodes.push_back(Ldn); - NextInChain = Ldn->getChain().getNode(); - continue; - } else { - Index = nullptr; - break; - } - } - } } // We need to check that merging these stores does not cause a loop @@ -11592,67 +11492,36 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { if (MemVT.isVector() && IsLoadSrc) return false; - // Only look at ends of store sequences. - SDValue Chain = SDValue(St, 0); - if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) - return false; - - // Save the LoadSDNodes that we find in the chain. - // We need to make sure that these nodes do not interfere with - // any of the store nodes. - SmallVector AliasLoadNodes; - - // Save the StoreSDNodes that we find in the chain. + // Find potential store merge candidates by searching through chain sub-DAG SmallVector StoreNodes; - - getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes); + getStoreMergeCandidates(St, StoreNodes); // Check if there is anything to merge. if (StoreNodes.size() < 2) return false; - // only do dependence check in AA case - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - if (UseAA && !checkMergeStoreCandidatesForDependencies(StoreNodes)) + // Check that we can merge these candidates without causing a cycle + if (!checkMergeStoreCandidatesForDependencies(StoreNodes)) return false; // Sort the memory operands according to their distance from the - // base pointer. As a secondary criteria: make sure stores coming - // later in the code come first in the list. This is important for - // the non-UseAA case, because we're merging stores into the FINAL - // store along a chain which potentially contains aliasing stores. - // Thus, if there are multiple stores to the same address, the last - // one can be considered for merging but not the others. + // base pointer. std::sort(StoreNodes.begin(), StoreNodes.end(), [](MemOpLink LHS, MemOpLink RHS) { - return LHS.OffsetFromBase < RHS.OffsetFromBase || - (LHS.OffsetFromBase == RHS.OffsetFromBase && - LHS.SequenceNum < RHS.SequenceNum); - }); + return LHS.OffsetFromBase < RHS.OffsetFromBase; + }); // Scan the memory operations on the chain and find the first non-consecutive // store memory address. unsigned LastConsecutiveStore = 0; int64_t StartAddress = StoreNodes[0].OffsetFromBase; - for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { - - // Check that the addresses are consecutive starting from the second - // element in the list of stores. - if (i > 0) { - int64_t CurrAddress = StoreNodes[i].OffsetFromBase; - if (CurrAddress - StartAddress != (ElementSizeBytes * i)) - break; - } - // Check if this store interferes with any of the loads that we found. - // If we find a load that alias with this store. Stop the sequence. - if (any_of(AliasLoadNodes, [&](LSBaseSDNode *Ldn) { - return isAlias(Ldn, StoreNodes[i].MemNode); - })) + // Check that the addresses are consecutive starting from the second + // element in the list of stores. + for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) { + int64_t CurrAddress = StoreNodes[i].OffsetFromBase; + if (CurrAddress - StartAddress != (ElementSizeBytes * i)) break; - - // Mark this node as useful. LastConsecutiveStore = i; } @@ -11806,7 +11675,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { } // We found a potential memory operand to merge. - LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); + LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset)); } if (LoadNodes.size() < 2) @@ -11895,22 +11764,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { // Collect the chains from all merged stores. SmallVector MergeStoreChains; - MergeStoreChains.push_back(StoreNodes[0].MemNode->getChain()); - - // The latest Node in the DAG. - unsigned LatestNodeUsed = 0; - for (unsigned i=1; igetChain()); - } - - LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; // Find if it is better to use vectors or integers to load and store // to memory. @@ -11945,23 +11800,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { SDValue(NewLoad.getNode(), 1)); } - if (UseAA) { - // Replace the all stores with the new store. - for (unsigned i = 0; i < NumElem; ++i) - CombineTo(StoreNodes[i].MemNode, NewStore); - } else { - // Replace the last store with the new store. - CombineTo(LatestOp, NewStore); - // Erase all other stores. - for (unsigned i = 0; i < NumElem; ++i) { - // Remove all Store nodes. - if (StoreNodes[i].MemNode == LatestOp) - continue; - StoreSDNode *St = cast(StoreNodes[i].MemNode); - DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); - deleteAndRecombine(St); - } - } + // Replace the all stores with the new store. + for (unsigned i = 0; i < NumElem; ++i) + CombineTo(StoreNodes[i].MemNode, NewStore); return true; } @@ -12119,19 +11960,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { if (SDValue NewST = TransformFPLoadStorePair(N)) return NewST; - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif - if (UseAA && ST->isUnindexed()) { - // FIXME: We should do this even without AA enabled. AA will just allow - // FindBetterChain to work in more situations. The problem with this is that - // any combine that expects memory operations to be on consecutive chains - // first needs to be updated to look for users of the same chain. - + if (ST->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes, on this store and any // adjacent stores. if (findBetterNeighborChains(ST)) { @@ -12165,8 +11994,13 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { if (SimplifyDemandedBits( Value, APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), - ST->getMemoryVT().getScalarSizeInBits()))) + ST->getMemoryVT().getScalarSizeInBits()))) { + // Re-visit the store if anything changed; SimplifyDemandedBits + // will add Value's node back to the worklist if necessary, but + // we also need to re-visit the Store node itself. + AddToWorklist(N); return SDValue(N, 0); + } } // If this is a load followed by a store to the same location, then the store @@ -15159,6 +14993,18 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); } +// This function tries to collect a bunch of potentially interesting +// nodes to improve the chains of, all at once. This might seem +// redundant, as this function gets called when visiting every store +// node, so why not let the work be done on each store as it's visited? +// +// I believe this is mainly important because MergeConsecutiveStores +// is unable to deal with merging stores of different sizes, so unless +// we improve the chains of all the potential candidates up-front +// before running MergeConsecutiveStores, it might only see some of +// the nodes that will eventually be candidates, and then not be able +// to go from a partially-merged state to the desired final +// fully-merged state. bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { // This holds the base pointer, index, and the offset in bytes from the base // pointer. @@ -15194,10 +15040,8 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { if (!Ptr.equalBaseIndex(BasePtr)) break; - // Find the next memory operand in the chain. If the next operand in the - // chain is a store then move up and continue the scan with the next - // memory operand. If the next operand is a load save it and use alias - // information to check if it interferes with anything. + // Walk up the chain to find the next store node, ignoring any + // intermediate loads. Any other kind of node will halt the loop. SDNode *NextInChain = Index->getChain().getNode(); while (true) { if (StoreSDNode *STn = dyn_cast(NextInChain)) { @@ -15216,9 +15060,14 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { Index = nullptr; break; } - } + } // end while } + // At this point, ChainedStores lists all of the Store nodes + // reachable by iterating up through chain nodes matching the above + // conditions. For each such store identified, try to find an + // earlier chain to attach the store to which won't violate the + // required ordering. bool MadeChangeToSt = false; SmallVector, 8> BetterChains; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index be66f7e..0016964 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -824,7 +824,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { MinFunctionAlignment = 0; PrefFunctionAlignment = 0; PrefLoopAlignment = 0; - GatherAllAliasesMaxDepth = 6; + GatherAllAliasesMaxDepth = 18; MinStackArgumentAlignment = 1; MinimumJumpTableEntries = 4; // TODO: the default will be switched to 0 in the next commit, along diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index e214164..26a136e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -446,16 +446,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, setSelectIsExpensive(false); PredictableSelectIsExpensive = false; - // We want to find all load dependencies for long chains of stores to enable - // merging into very wide vectors. The problem is with vectors with > 4 - // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 - // vectors are a legal type, even though we have to split the loads - // usually. When we can more precisely specify load legality per address - // space, we should be able to make FindBetterChain/MergeConsecutiveStores - // smarter so that they can figure out what to do in 2 iterations without all - // N > 4 stores on the same chain. - GatherAllAliasesMaxDepth = 16; - // FIXME: Need to really handle these. MaxStoresPerMemcpy = 4096; MaxStoresPerMemmove = 4096; diff --git a/llvm/test/CodeGen/AArch64/argument-blocks.ll b/llvm/test/CodeGen/AArch64/argument-blocks.ll index 3169abc..0eaf15b 100644 --- a/llvm/test/CodeGen/AArch64/argument-blocks.ll +++ b/llvm/test/CodeGen/AArch64/argument-blocks.ll @@ -62,7 +62,7 @@ define i64 @test_hfa_ignores_gprs([7 x float], [2 x float] %in, i64, i64 %res) { ; but should go in an 8-byte aligned slot. define void @test_varargs_stackalign() { ; CHECK-LABEL: test_varargs_stackalign: -; CHECK-DARWINPCS: stp {{w[0-9]+}}, {{w[0-9]+}}, [sp, #16] +; CHECK-DARWINPCS: str {{x[0-9]+}}, [sp, #16] call void(...) @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0]) ret void diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll index a29f8c4..12ae335 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -6,17 +6,13 @@ define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp { ; CHECK-LABEL: fn9: ; 9th fixed argument -; CHECK: ldr {{w[0-9]+}}, [sp, #64] -; CHECK: add [[ARGS:x[0-9]+]], sp, #72 -; CHECK: add {{x[0-9]+}}, [[ARGS]], #8 +; CHECK: add x[[ADDR:[0-9]+]], sp, #72 ; First vararg -; CHECK: ldr {{w[0-9]+}}, [sp, #72] -; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; CHECK-DAG: ldr {{w[0-9]+}}, [sp, #72] ; Second vararg -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] -; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; CHECK-DAG: ldr {{w[0-9]+}}, [x[[ADDR]]] ; Third vararg -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] +; CHECK-DAG: ldr {{w[0-9]+}}, [x[[ADDR]]], #8 %1 = alloca i32, align 4 %2 = alloca i32, align 4 %3 = alloca i32, align 4 diff --git a/llvm/test/CodeGen/AArch64/arm64-abi.ll b/llvm/test/CodeGen/AArch64/arm64-abi.ll index fb52b1d..6cf0ab3 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi.ll @@ -205,10 +205,7 @@ declare i32 @args_i32(i32, i32, i32, i32, i32, i32, i32, i32, i16 signext, i32, define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind { entry: ; CHECK-LABEL: test8 -; CHECK: strb {{w[0-9]+}}, [sp, #3] -; CHECK: strb wzr, [sp, #2] -; CHECK: strb {{w[0-9]+}}, [sp, #1] -; CHECK: strb wzr, [sp] +; CHECK: str w8, [sp] ; CHECK: bl ; FAST-LABEL: test8 ; FAST: strb {{w[0-9]+}}, [sp] diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll index 8f22f97..c501818 100644 --- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll +++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll @@ -9,11 +9,15 @@ entry: ret void } +; FIXME: This shouldn't need to load in a zero value to store +; (e.g. stp xzr,xzr [sp, #16]) + define void @t2() nounwind ssp { entry: ; CHECK-LABEL: t2: +; CHECK: movi v0.2d, #0000000000000000 +; CHECK: stur q0, [sp, #16] ; CHECK: strh wzr, [sp, #32] -; CHECK: stp xzr, xzr, [sp, #16] ; CHECK: str xzr, [sp, #8] %buf = alloca [26 x i8], align 1 %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0 diff --git a/llvm/test/CodeGen/AArch64/arm64-stur.ll b/llvm/test/CodeGen/AArch64/arm64-stur.ll index 4a3229a..a0d335b 100644 --- a/llvm/test/CodeGen/AArch64/arm64-stur.ll +++ b/llvm/test/CodeGen/AArch64/arm64-stur.ll @@ -47,11 +47,14 @@ define void @foo5(i8* %p, i32 %val) nounwind { ret void } +;; FIXME: Again, with the writing of a quadword zero... + define void @foo(%struct.X* nocapture %p) nounwind optsize ssp { ; CHECK-LABEL: foo: ; CHECK-NOT: str -; CHECK: stur xzr, [x0, #12] -; CHECK-NEXT: stur xzr, [x0, #4] +; CHECK: stur q0, [x0, #4] +; CHECK-FIXME: stur xzr, [x0, #12] +; CHECK-FIXME-NEXT: stur xzr, [x0, #4] ; CHECK-NEXT: ret %B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1 %val = bitcast i64* %B to i8* diff --git a/llvm/test/CodeGen/AArch64/merge-store.ll b/llvm/test/CodeGen/AArch64/merge-store.ll index 1d0196a..7c4e835 100644 --- a/llvm/test/CodeGen/AArch64/merge-store.ll +++ b/llvm/test/CodeGen/AArch64/merge-store.ll @@ -4,8 +4,9 @@ @g0 = external global <3 x float>, align 16 @g1 = external global <3 x float>, align 4 -; CHECK: ldr s[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]]{{\]}}, #4 -; CHECK: ld1{{\.?s?}} { v[[R0]]{{\.?s?}} }[1], {{\[}}[[R1]]{{\]}} +; CHECK: ldr q[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]], :lo12:g0 +;; TODO: this next line seems like a redundant no-op move? +; CHECK: ins v0.s[1], v0.s[1] ; CHECK: str d[[R0]] define void @blam() { diff --git a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll index 9220947..e4e64ef 100644 --- a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll +++ b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll @@ -1,5 +1,4 @@ -; RUN: llc --combiner-alias-analysis=false < %s | FileCheck %s -; RUN: llc --combiner-alias-analysis=true < %s | FileCheck %s +; RUN: llc < %s | FileCheck %s ; This test checks that we do not merge stores together which have ; dependencies through their non-chain operands (e.g. one store is the diff --git a/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll b/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll index dcd48c9..50f081b 100644 --- a/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll +++ b/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll @@ -88,12 +88,9 @@ define void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out ; SI-DAG: v_cvt_f32_ubyte2_e32 ; SI-DAG: v_cvt_f32_ubyte3_e32 -; SI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 24 -; SI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16 ; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16 ; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 8 ; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffff, -; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff00, ; SI-DAG: v_add_i32 ; SI: buffer_store_dwordx4 diff --git a/llvm/test/CodeGen/AMDGPU/debugger-insert-nops.ll b/llvm/test/CodeGen/AMDGPU/debugger-insert-nops.ll index 6638f4e..7be7d94 100644 --- a/llvm/test/CodeGen/AMDGPU/debugger-insert-nops.ll +++ b/llvm/test/CodeGen/AMDGPU/debugger-insert-nops.ll @@ -1,13 +1,21 @@ -; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECKNOP -; CHECK: test01.cl:2:{{[0-9]+}} -; CHECK-NEXT: s_nop 0 +; This test expects that we have one instance for each line in some order with "s_nop 0" instances after each. -; CHECK: test01.cl:3:{{[0-9]+}} -; CHECK-NEXT: s_nop 0 +; Check that each line appears at least once +; CHECK-DAG: test01.cl:2:3 +; CHECK-DAG: test01.cl:3:3 +; CHECK-DAG: test01.cl:4:3 -; CHECK: test01.cl:4:{{[0-9]+}} -; CHECK-NEXT: s_nop 0 + +; Check that each of each of the lines consists of the line output, followed by "s_nop 0" +; CHECKNOP: test01.cl:{{[234]}}:3 +; CHECKNOP-NEXT: s_nop 0 +; CHECKNOP: test01.cl:{{[234]}}:3 +; CHECKNOP-NEXT: s_nop 0 +; CHECKNOP: test01.cl:{{[234]}}:3 +; CHECKNOP-NEXT: s_nop 0 ; CHECK: test01.cl:5:{{[0-9]+}} ; CHECK-NEXT: s_nop 0 @@ -21,7 +29,7 @@ entry: call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !17, metadata !18), !dbg !19 %0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !20 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i32 0, !dbg !20 - store i32 1, i32 addrspace(1)* %arrayidx, align 4, !dbg !21 + store i32 1, i32 addrspace(1)* %arrayidx, align 4, !dbg !20 %1 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !22 %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %1, i32 1, !dbg !22 store i32 2, i32 addrspace(1)* %arrayidx1, align 4, !dbg !23 diff --git a/llvm/test/CodeGen/AMDGPU/merge-stores.ll b/llvm/test/CodeGen/AMDGPU/merge-stores.ll index 96359f8..c870b76 100644 --- a/llvm/test/CodeGen/AMDGPU/merge-stores.ll +++ b/llvm/test/CodeGen/AMDGPU/merge-stores.ll @@ -1,8 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s - -; RUN: llc -march=amdgcn -verify-machineinstrs -combiner-alias-analysis -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -combiner-alias-analysis -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s +; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s ; This test is mostly to test DAG store merging, so disable the vectorizer. ; Run with devices with different unaligned load restrictions. @@ -149,17 +146,10 @@ define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 { ret void } -; FIXME: Should be able to merge this ; GCN-LABEL: {{^}}merge_global_store_4_constants_mixed_i32_f32: -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v - -; GCN-AA: buffer_store_dwordx2 -; GCN-AA: buffer_store_dword v -; GCN-AA: buffer_store_dword v - +; GCN: buffer_store_dwordx2 +; GCN: buffer_store_dword v +; GCN: buffer_store_dword v ; GCN: s_endpgm define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 { %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 @@ -478,17 +468,9 @@ define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1 ret void } -; This works once AA is enabled on the subtarget ; GCN-LABEL: {{^}}merge_global_store_4_vector_elts_loads_v4i32: ; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]] - -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v - -; GCN-AA: buffer_store_dwordx4 [[LOAD]] - +; GCN: buffer_store_dwordx4 [[LOAD]] ; GCN: s_endpgm define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 diff --git a/llvm/test/CodeGen/AMDGPU/private-element-size.ll b/llvm/test/CodeGen/AMDGPU/private-element-size.ll index cd8fb22..d51083f 100644 --- a/llvm/test/CodeGen/AMDGPU/private-element-size.ll +++ b/llvm/test/CodeGen/AMDGPU/private-element-size.ll @@ -32,10 +32,10 @@ ; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:24{{$}} ; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:28{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}} define void @private_elt_size_v4i32(<4 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll b/llvm/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll index 8523f1c..af2dcf2 100644 --- a/llvm/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll +++ b/llvm/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll @@ -156,9 +156,8 @@ define void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, ; FUNC-LABEL: @reorder_local_offsets ; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:100 offset1:102 -; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:3 offset1:100 -; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12 -; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408 +; CI-DAG: ds_write2_b32 {{v[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:3 offset1:100 +; CI-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408 ; CI: buffer_store_dword ; CI: s_endpgm define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 { @@ -180,12 +179,12 @@ define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspa } ; FUNC-LABEL: @reorder_global_offsets -; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 -; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 -; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 -; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 -; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 -; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 +; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 +; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408 +; CI: buffer_store_dword ; CI: s_endpgm define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 { %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3 diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll index 6c33bc9..da12d6b 100644 --- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll @@ -3,6 +3,13 @@ ; RUN: llc -march=amdgcn -mcpu=hawaii -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CIHSA -check-prefix=HSA %s ; RUN: llc -march=amdgcn -mcpu=fiji -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VIHSA -check-prefix=HSA %s +; FIXME: this fails because the load generated from extractelement is +;; now properly recognized as forwardable to the value stored in +;; insertelement, and thus the loads/stores drop away entirely. This +;; makes the intended test, of running out of registers, not occur. + +;; XFAIL: * + ; This ends up using all 256 registers and requires register ; scavenging which will fail to find an unsued register. diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll index 52fa0be..886a465 100644 --- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -1,6 +1,12 @@ ; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +;; FIXME: this fails because the load generated from extractelement is +;; now properly recognized as forwardable to the value stored in +;; insertelement, and thus the loads/stores drop away entirely. This +;; makes the intended test, of running out of registers, not occur. +;; XFAIL: * + ; This ends up using all 255 registers and requires register ; scavenging which will fail to find an unsued register. diff --git a/llvm/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll b/llvm/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll index 4a1341c..d2ff09a 100644 --- a/llvm/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll +++ b/llvm/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll @@ -12,7 +12,8 @@ define void @test_byval_8_bytes_alignment(i32 %i, ...) { entry: ; CHECK: sub sp, sp, #12 ; CHECK: sub sp, sp, #4 -; CHECK: stmib sp, {r1, r2, r3} +; CHECK: add r0, sp, #4 +; CHECK: stm sp, {r0, r1, r2, r3} %g = alloca i8* %g1 = bitcast i8** %g to i8* call void @llvm.va_start(i8* %g1) diff --git a/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll b/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll index 600fb6a..0a20dff 100644 --- a/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll +++ b/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll @@ -51,12 +51,12 @@ entry: ; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] +; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] ; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] ; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16 -; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] +; REALIGN: orr r[[R1:[0-9]+]], r[[R1]], #16 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] ; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48 diff --git a/llvm/test/CodeGen/ARM/ifcvt10.ll b/llvm/test/CodeGen/ARM/ifcvt10.ll index 5725a40..c7e18d3 100644 --- a/llvm/test/CodeGen/ARM/ifcvt10.ll +++ b/llvm/test/CodeGen/ARM/ifcvt10.ll @@ -10,8 +10,6 @@ entry: ; CHECK: vpop {d8} ; CHECK-NOT: vpopne ; CHECK: pop {r7, pc} -; CHECK: vpop {d8} -; CHECK: pop {r7, pc} br i1 undef, label %if.else, label %if.then if.then: ; preds = %entry diff --git a/llvm/test/CodeGen/ARM/memset-inline.ll b/llvm/test/CodeGen/ARM/memset-inline.ll index f6f8d56..cc2b1a8 100644 --- a/llvm/test/CodeGen/ARM/memset-inline.ll +++ b/llvm/test/CodeGen/ARM/memset-inline.ll @@ -3,9 +3,15 @@ define void @t1(i8* nocapture %c) nounwind optsize { entry: ; CHECK-LABEL: t1: + +;; FIXME: like with arm64-memset-inline.ll, learning how to merge +;; stores made this code worse, since it now uses a vector move, +;; instead of just using an strd instruction taking two registers. + +; CHECK: vmov.i32 d16, #0x0 +; CHECK: vst1.32 {d16}, [r0:64]! ; CHECK: movs r1, #0 -; CHECK: strd r1, r1, [r0] -; CHECK: str r1, [r0, #8] +; CHECK: str r1, [r0] call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false) ret void } diff --git a/llvm/test/CodeGen/ARM/static-addr-hoisting.ll b/llvm/test/CodeGen/ARM/static-addr-hoisting.ll index 3d47e02..683d607 100644 --- a/llvm/test/CodeGen/ARM/static-addr-hoisting.ll +++ b/llvm/test/CodeGen/ARM/static-addr-hoisting.ll @@ -6,9 +6,9 @@ define void @multiple_store() { ; CHECK: movs [[VAL:r[0-9]+]], #42 ; CHECK: movt r[[BASE1]], #15 -; CHECK: str [[VAL]], [r[[BASE1]]] -; CHECK: str [[VAL]], [r[[BASE1]], #24] -; CHECK: str.w [[VAL]], [r[[BASE1]], #42] +; CHECK-DAG: str [[VAL]], [r[[BASE1]]] +; CHECK-DAG: str [[VAL]], [r[[BASE1]], #24] +; CHECK-DAG: str.w [[VAL]], [r[[BASE1]], #42] ; CHECK: movw r[[BASE2:[0-9]+]], #20394 ; CHECK: movt r[[BASE2]], #18 diff --git a/llvm/test/CodeGen/BPF/undef.ll b/llvm/test/CodeGen/BPF/undef.ll index ef712c4..9e4223d 100644 --- a/llvm/test/CodeGen/BPF/undef.ll +++ b/llvm/test/CodeGen/BPF/undef.ll @@ -12,51 +12,51 @@ @llvm.used = appending global [6 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @_license, i32 0, i32 0), i8* bitcast (i32 (%struct.__sk_buff*)* @ebpf_filter to i8*), i8* bitcast (%struct.bpf_map_def* @routing to i8*), i8* bitcast (%struct.bpf_map_def* @routing_miss_0 to i8*), i8* bitcast (%struct.bpf_map_def* @test1 to i8*), i8* bitcast (%struct.bpf_map_def* @test1_miss_4 to i8*)], section "llvm.metadata" ; Function Attrs: nounwind uwtable +; CHECK: mov r2, r10 +; CHECK: addi r2, -2 +; CHECK: mov r1, 0 +; CHECK: sth 6(r2), r1 +; CHECK: sth 4(r2), r1 +; CHECK: sth 2(r2), r1 +; CHECK: mov r2, 6 +; CHECK: stb -7(r10), r2 +; CHECK: mov r2, 5 +; CHECK: stb -8(r10), r2 +; CHECK: mov r2, 7 +; CHECK: stb -6(r10), r2 +; CHECK: mov r2, 8 +; CHECK: stb -5(r10), r2 +; CHECK: mov r2, 9 +; CHECK: stb -4(r10), r2 +; CHECK: mov r2, 10 +; CHECK: stb -3(r10), r2 +; CHECK: sth 24(r10), r1 +; CHECK: sth 22(r10), r1 +; CHECK: sth 20(r10), r1 +; CHECK: sth 18(r10), r1 +; CHECK: sth 16(r10), r1 +; CHECK: sth 14(r10), r1 +; CHECK: sth 12(r10), r1 +; CHECK: sth 10(r10), r1 +; CHECK: sth 8(r10), r1 +; CHECK: sth 6(r10), r1 +; CHECK: sth -2(r10), r1 +; CHECK: sth 26(r10), r1 define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 section "socket1" { %key = alloca %struct.routing_key_2, align 1 %1 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 0 -; CHECK: mov r1, 5 -; CHECK: stb -8(r10), r1 store i8 5, i8* %1, align 1 %2 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 1 -; CHECK: mov r1, 6 -; CHECK: stb -7(r10), r1 store i8 6, i8* %2, align 1 %3 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 2 -; CHECK: mov r1, 7 -; CHECK: stb -6(r10), r1 store i8 7, i8* %3, align 1 %4 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 3 -; CHECK: mov r1, 8 -; CHECK: stb -5(r10), r1 store i8 8, i8* %4, align 1 %5 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 4 -; CHECK: mov r1, 9 -; CHECK: stb -4(r10), r1 store i8 9, i8* %5, align 1 %6 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 5 -; CHECK: mov r1, 10 -; CHECK: stb -3(r10), r1 store i8 10, i8* %6, align 1 %7 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 1, i32 0, i64 0 -; CHECK: mov r1, r10 -; CHECK: addi r1, -2 -; CHECK: mov r2, 0 -; CHECK: sth 6(r1), r2 -; CHECK: sth 4(r1), r2 -; CHECK: sth 2(r1), r2 -; CHECK: sth 24(r10), r2 -; CHECK: sth 22(r10), r2 -; CHECK: sth 20(r10), r2 -; CHECK: sth 18(r10), r2 -; CHECK: sth 16(r10), r2 -; CHECK: sth 14(r10), r2 -; CHECK: sth 12(r10), r2 -; CHECK: sth 10(r10), r2 -; CHECK: sth 8(r10), r2 -; CHECK: sth 6(r10), r2 -; CHECK: sth -2(r10), r2 -; CHECK: sth 26(r10), r2 call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 30, i32 1, i1 false) %8 = call i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...) bitcast (i32 (...)* @bpf_map_lookup_elem to i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...)*)(%struct.bpf_map_def* nonnull @routing, %struct.routing_key_2* nonnull %key) #3 ret i32 undef diff --git a/llvm/test/CodeGen/MSP430/Inst16mm.ll b/llvm/test/CodeGen/MSP430/Inst16mm.ll index c75e1be..a48d859 100644 --- a/llvm/test/CodeGen/MSP430/Inst16mm.ll +++ b/llvm/test/CodeGen/MSP430/Inst16mm.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=msp430 -combiner-alias-analysis < %s | FileCheck %s +; RUN: llc -march=msp430 < %s | FileCheck %s target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8" target triple = "msp430-generic-generic" @foo = common global i16 0, align 2 diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-float.ll b/llvm/test/CodeGen/Mips/cconv/arguments-float.ll index a76cf62..b321d7e 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-float.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-float.ll @@ -63,39 +63,39 @@ entry: ; NEW-DAG: sd $5, 16([[R2]]) ; O32 has run out of argument registers and starts using the stack -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 16($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 20($sp) ; O32-DAG: sw [[R3]], 24([[R2]]) ; O32-DAG: sw [[R4]], 28([[R2]]) ; NEW-DAG: sd $6, 24([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) ; O32-DAG: sw [[R3]], 32([[R2]]) ; O32-DAG: sw [[R4]], 36([[R2]]) ; NEW-DAG: sd $7, 32([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) ; O32-DAG: sw [[R3]], 40([[R2]]) ; O32-DAG: sw [[R4]], 44([[R2]]) ; NEW-DAG: sd $8, 40([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) ; O32-DAG: sw [[R3]], 48([[R2]]) ; O32-DAG: sw [[R4]], 52([[R2]]) ; NEW-DAG: sd $9, 48([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 56($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 60($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) ; O32-DAG: sw [[R3]], 56([[R2]]) ; O32-DAG: sw [[R4]], 60([[R2]]) ; NEW-DAG: sd $10, 56([[R2]]) ; N32/N64 have run out of registers and starts using the stack too -; O32-DAG: lw [[R3:\$[0-9]+]], 64($sp) -; O32-DAG: lw [[R4:\$[0-9]+]], 68($sp) +; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp) ; O32-DAG: sw [[R3]], 64([[R2]]) ; O32-DAG: sw [[R4]], 68([[R2]]) ; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp) diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll index 9c20b88..226895e 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll @@ -315,12 +315,11 @@ entry: ; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte ; order. ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -349,10 +348,9 @@ entry: ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) @@ -678,12 +676,11 @@ entry: ; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte ; order. ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -712,10 +709,9 @@ entry: ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 ; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) @@ -1040,10 +1036,9 @@ entry: ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) ; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -1072,10 +1067,9 @@ entry: ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) diff --git a/llvm/test/CodeGen/Mips/fastcc.ll b/llvm/test/CodeGen/Mips/fastcc.ll index dee4258..4b151d0 100644 --- a/llvm/test/CodeGen/Mips/fastcc.ll +++ b/llvm/test/CodeGen/Mips/fastcc.ll @@ -132,20 +132,19 @@ entry: define internal fastcc void @callee0(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15, i32 %a16) nounwind noinline { entry: ; CHECK: callee0 -; CHECK: sw $4 -; CHECK: sw $5 -; CHECK: sw $6 -; CHECK: sw $7 -; CHECK: sw $8 -; CHECK: sw $9 -; CHECK: sw $10 -; CHECK: sw $11 -; CHECK: sw $12 -; CHECK: sw $13 -; CHECK: sw $14 -; CHECK: sw $15 -; CHECK: sw $24 -; CHECK: sw $3 +; CHECK-DAG: sw $4 +; CHECK-DAG: sw $5 +; CHECK-DAG: sw $7 +; CHECK-DAG: sw $8 +; CHECK-DAG: sw $9 +; CHECK-DAG: sw $10 +; CHECK-DAG: sw $11 +; CHECK-DAG: sw $12 +; CHECK-DAG: sw $13 +; CHECK-DAG: sw $14 +; CHECK-DAG: sw $15 +; CHECK-DAG: sw $24 +; CHECK-DAG: sw $3 ; t6, t7 and t8 are reserved in NaCl and cannot be used for fastcc. ; CHECK-NACL-NOT: sw $14 @@ -223,27 +222,27 @@ entry: define internal fastcc void @callee1(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15, float %a16, float %a17, float %a18, float %a19, float %a20) nounwind noinline { entry: -; CHECK: callee1 -; CHECK: swc1 $f0 -; CHECK: swc1 $f1 -; CHECK: swc1 $f2 -; CHECK: swc1 $f3 -; CHECK: swc1 $f4 -; CHECK: swc1 $f5 -; CHECK: swc1 $f6 -; CHECK: swc1 $f7 -; CHECK: swc1 $f8 -; CHECK: swc1 $f9 -; CHECK: swc1 $f10 -; CHECK: swc1 $f11 -; CHECK: swc1 $f12 -; CHECK: swc1 $f13 -; CHECK: swc1 $f14 -; CHECK: swc1 $f15 -; CHECK: swc1 $f16 -; CHECK: swc1 $f17 -; CHECK: swc1 $f18 -; CHECK: swc1 $f19 +; CHECK-LABEL: callee1: +; CHECK-DAG: swc1 $f0 +; CHECK-DAG: swc1 $f1 +; CHECK-DAG: swc1 $f2 +; CHECK-DAG: swc1 $f3 +; CHECK-DAG: swc1 $f4 +; CHECK-DAG: swc1 $f5 +; CHECK-DAG: swc1 $f6 +; CHECK-DAG: swc1 $f7 +; CHECK-DAG: swc1 $f8 +; CHECK-DAG: swc1 $f9 +; CHECK-DAG: swc1 $f10 +; CHECK-DAG: swc1 $f11 +; CHECK-DAG: swc1 $f12 +; CHECK-DAG: swc1 $f13 +; CHECK-DAG: swc1 $f14 +; CHECK-DAG: swc1 $f15 +; CHECK-DAG: swc1 $f16 +; CHECK-DAG: swc1 $f17 +; CHECK-DAG: swc1 $f18 +; CHECK-DAG: swc1 $f19 store float %a0, float* @gf0, align 4 store float %a1, float* @gf1, align 4 @@ -290,7 +289,6 @@ entry: ; NOODDSPREG-DAG: lwc1 $f18, 36($[[R0]]) ; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 40($[[R0]]) -; NOODDSPREG-DAG: swc1 $[[F0]], 8($sp) %0 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 %1 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4 @@ -316,8 +314,6 @@ entry: ; NOODDSPREG-LABEL: callee2: -; NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]] - ; Check that first 10 arguments are received in even float registers ; f0, f2, ... , f18. Check that 11th argument is received on stack. @@ -333,7 +329,7 @@ entry: ; NOODDSPREG-DAG: swc1 $f16, 32($[[R0]]) ; NOODDSPREG-DAG: swc1 $f18, 36($[[R0]]) -; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) +; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 0($sp) ; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]]) store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 @@ -397,7 +393,6 @@ entry: ; FP64-NOODDSPREG-LABEL: callee3: -; FP64-NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]] ; Check that first 10 arguments are received in even float registers ; f0, f2, ... , f18. Check that 11th argument is received on stack. @@ -414,7 +409,7 @@ entry: ; FP64-NOODDSPREG-DAG: sdc1 $f16, 64($[[R0]]) ; FP64-NOODDSPREG-DAG: sdc1 $f18, 72($[[R0]]) -; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) +; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 0($sp) ; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 80($[[R0]]) store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8 diff --git a/llvm/test/CodeGen/Mips/load-store-left-right.ll b/llvm/test/CodeGen/Mips/load-store-left-right.ll index 3bd924a..6def55c 100644 --- a/llvm/test/CodeGen/Mips/load-store-left-right.ll +++ b/llvm/test/CodeGen/Mips/load-store-left-right.ll @@ -250,12 +250,18 @@ entry: ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)( ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)( -; FIXME: We should be able to do better than this on MIPS32r6/MIPS64r6 since -; we have unaligned halfword load/store available -; ALL-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; ALL-DAG: sb $[[R1]], 2($[[PTR]]) -; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; ALL-DAG: sb $[[R1]], 3($[[PTR]]) +; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-DAG: sb $[[R1]], 2($[[PTR]]) +; MIPS32-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]]) +; MIPS32-DAG: sb $[[R2]], 3($[[PTR]]) + +; MIPS32R6: lhu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32R6: sh $[[R1]], 2($[[PTR]]) + +; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-DAG: sb $[[R1]], 2($[[PTR]]) +; MIPS64-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]]) +; MIPS64-DAG: sb $[[R2]], 3($[[PTR]]) %0 = load %struct.S0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 0), align 1 store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 1), align 1 @@ -268,37 +274,54 @@ entry: ; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s1)( ; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s1)( -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 4($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 5($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 6($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 7($[[PTR]]) +; MIPS32-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS32-EL-DAG: lwr $[[R1]], 0($[[PTR]]) +; MIPS32-EL-DAG: swl $[[R1]], 7($[[PTR]]) +; MIPS32-EL-DAG: swr $[[R1]], 4($[[PTR]]) +; MIPS32-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS32-EB-DAG: swl $[[R1]], 4($[[PTR]]) +; MIPS32-EB-DAG: swr $[[R1]], 7($[[PTR]]) + +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]]) ; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s1)( -; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS32R6-DAG: sh $[[R1]], 4($[[PTR]]) -; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS32R6-DAG: sh $[[R1]], 6($[[PTR]]) +; MIPS32R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32R6-DAG: sw $[[R1]], 4($[[PTR]]) ; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 4($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 5($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 6($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 7($[[PTR]]) + +; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) +; MIPS64-EL-DAG: swl $[[R1]], 7($[[PTR]]) +; MIPS64-EL-DAG: swr $[[R1]], 4($[[PTR]]) + +; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS64-EB-DAG: swl $[[R1]], 4($[[PTR]]) +; MIPS64-EB-DAG: swr $[[R1]], 7($[[PTR]]) + + +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]]) ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( -; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64R6-DAG: sh $[[R1]], 4($[[PTR]]) -; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]]) +; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64R6-DAG: sw $[[R1]], 4($[[PTR]]) %0 = load %struct.S1, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 0), align 1 store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 1), align 1 @@ -336,30 +359,21 @@ entry: ; MIPS32R6-DAG: sw $[[R1]], 12($[[PTR]]) ; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) -; MIPS64-EL-DAG: swl $[[R1]], 11($[[PTR]]) -; MIPS64-EL-DAG: swr $[[R1]], 8($[[PTR]]) -; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 7($[[PTR]]) -; MIPS64-EL-DAG: lwr $[[R1]], 4($[[PTR]]) -; MIPS64-EL-DAG: swl $[[R1]], 15($[[PTR]]) -; MIPS64-EL-DAG: swr $[[R1]], 12($[[PTR]]) + +; MIPS64-EL-DAG: ldl $[[R1:[0-9]+]], 7($[[PTR]]) +; MIPS64-EL-DAG: ldr $[[R1]], 0($[[PTR]]) +; MIPS64-EL-DAG: sdl $[[R1]], 15($[[PTR]]) +; MIPS64-EL-DAG: sdr $[[R1]], 8($[[PTR]]) ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) -; MIPS64-EB-DAG: swl $[[R1]], 8($[[PTR]]) -; MIPS64-EB-DAG: swr $[[R1]], 11($[[PTR]]) -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 4($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 7($[[PTR]]) -; MIPS64-EB-DAG: swl $[[R1]], 12($[[PTR]]) -; MIPS64-EB-DAG: swr $[[R1]], 15($[[PTR]]) +; MIPS64-EB-DAG: ldl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: ldr $[[R1]], 7($[[PTR]]) +; MIPS64-EB-DAG: sdl $[[R1]], 8($[[PTR]]) +; MIPS64-EB-DAG: sdr $[[R1]], 15($[[PTR]]) ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64R6-DAG: sw $[[R1]], 8($[[PTR]]) -; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]]) -; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]]) +; MIPS64R6-DAG: ld $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64R6-DAG: sd $[[R1]], 8($[[PTR]]) %0 = load %struct.S2, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 0), align 1 store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 1), align 1 @@ -416,17 +430,17 @@ entry: ; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) ; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) -; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)( -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) -; MIPS64-EB-DAG: dsll $[[R1]], $[[R1]], 32 +; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)( ; MIPS64-EB-DAG: lbu $[[R2:[0-9]+]], 5($[[PTR]]) ; MIPS64-EB-DAG: lbu $[[R3:[0-9]+]], 4($[[PTR]]) ; MIPS64-EB-DAG: dsll $[[T0:[0-9]+]], $[[R3]], 8 ; MIPS64-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]] -; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16 -; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R1]], $[[T1]] ; MIPS64-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]]) +; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16 +; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS64-EB-DAG: dsll $[[R5:[0-9]+]], $[[R1]], 32 +; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R5]], $[[T1]] ; MIPS64-EB-DAG: dsll $[[T4:[0-9]+]], $[[R4]], 8 ; MIPS64-EB-DAG: or $4, $[[T3]], $[[T4]] diff --git a/llvm/test/CodeGen/Mips/micromips-li.ll b/llvm/test/CodeGen/Mips/micromips-li.ll index ac315f9..997f4e9 100644 --- a/llvm/test/CodeGen/Mips/micromips-li.ll +++ b/llvm/test/CodeGen/Mips/micromips-li.ll @@ -13,6 +13,6 @@ entry: ret i32 0 } -; CHECK: li16 ${{[2-7]|16|17}}, 1 ; CHECK: addiu ${{[0-9]+}}, $zero, 2148 +; CHECK: li16 ${{[2-7]|16|17}}, 1 ; CHECK: ori ${{[0-9]+}}, $zero, 33332 diff --git a/llvm/test/CodeGen/Mips/mips64-f128.ll b/llvm/test/CodeGen/Mips/mips64-f128.ll index 2b1c154..7de3918 100644 --- a/llvm/test/CodeGen/Mips/mips64-f128.ll +++ b/llvm/test/CodeGen/Mips/mips64-f128.ll @@ -573,10 +573,10 @@ entry: ; ALL-LABEL: store_LD_LD: ; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1) -; ALL: ld $[[R1:[0-9]+]], 0($[[R0]]) ; ALL: ld $[[R2:[0-9]+]], 8($[[R0]]) ; ALL: ld $[[R3:[0-9]+]], %got_disp(gld0) ; ALL: sd $[[R2]], 8($[[R3]]) +; ALL: ld $[[R1:[0-9]+]], 0($[[R0]]) ; ALL: sd $[[R1]], 0($[[R3]]) define void @store_LD_LD() { diff --git a/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll index 9663138..0260afa 100644 --- a/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll +++ b/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll @@ -130,12 +130,12 @@ ; MM-MNO-PIC: addiu $[[R1:[0-9]+]], $[[R0]], %lo(_gp_disp) ; MM-MNO-PIC: addu $[[R2:[0-9]+]], $[[R1]], $25 ; MM-MNO-PIC: lw $[[R3:[0-9]+]], %got(g0)($[[R2]]) -; MM-MNO-PIC: lw16 $[[R4:[0-9]+]], 0($[[R3]]) -; MM-MNO-PIC: lw16 $[[R5:[0-9]+]], 4($[[R3]]) -; MM-MNO-LE-PIC: mtc1 $[[R4]], $f0 -; MM-MNO-LE-PIC: mthc1 $[[R5]], $f0 -; MM-MNO-BE-PIC: mtc1 $[[R5]], $f0 -; MM-MNO-BE-PIC: mthc1 $[[R4]], $f0 +; MM-MNO-PIC-DAG: lw16 $[[R4:[0-9]+]], 0($[[R3]]) +; MM-MNO-PIC-DAG: lw16 $[[R5:[0-9]+]], 4($[[R3]]) +; MM-MNO-LE-PIC-DAG: mtc1 $[[R4]], $f0 +; MM-MNO-LE-PIC-DAG: mthc1 $[[R5]], $f0 +; MM-MNO-BE-PIC-DAG: mtc1 $[[R5]], $f0 +; MM-MNO-BE-PIC-DAG: mthc1 $[[R4]], $f0 ; MM-STATIC-PIC: lui $[[R0:[0-9]+]], %hi(g0) ; MM-STATIC-PIC: ldc1 $f0, %lo(g0)($[[R0]]) @@ -214,13 +214,13 @@ entry: ; MM-MNO-PIC: lui $[[R0:[0-9]+]], %hi(_gp_disp) ; MM-MNO-PIC: addiu $[[R1:[0-9]+]], $[[R0]], %lo(_gp_disp) ; MM-MNO-PIC: addu $[[R2:[0-9]+]], $[[R1]], $25 -; MM-MNO-LE-PIC: mfc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfhc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-PIC: lw $[[R4:[0-9]+]], %got(g0)($[[R2]]) -; MM-MNO-PIC: sw16 $[[R3]], 0($[[R4]]) -; MM-MNO-LE-PIC: mfhc1 $[[R5:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfc1 $[[R5:[0-9]+]], $f12 -; MM-MNO-PIC: sw16 $[[R5]], 4($[[R4]]) +; MM-MNO-LE-PIC-DAG: mfc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfhc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: lw $[[R4:[0-9]+]], %got(g0)($[[R2]]) +; MM-MNO-PIC-DAG: sw16 $[[R3]], 0($[[R4]]) +; MM-MNO-LE-PIC-DAG: mfhc1 $[[R5:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfc1 $[[R5:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: sw16 $[[R5]], 4($[[R4]]) ; MM-STATIC-PIC: lui $[[R0:[0-9]+]], %hi(g0) ; MM-STATIC-PIC: sdc1 $f12, %lo(g0)($[[R0]]) @@ -267,8 +267,8 @@ entry: ; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $5, 3 ; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $4, $[[R0]] -; MM-MNO-PIC: lw16 $[[R2:[0-9]+]], 0($[[R1]]) -; MM-MNO-PIC: lw16 $[[R3:[0-9]+]], 4($[[R1]]) +; MM-MNO-PIC-DAG: lw16 $[[R2:[0-9]+]], 0($[[R1]]) +; MM-MNO-PIC-DAG: lw16 $[[R3:[0-9]+]], 4($[[R1]]) ; MM-MNO-LE-PIC: mtc1 $[[R2]], $f0 ; MM-MNO-LE-PIC: mthc1 $[[R3]], $f0 ; MM-MNO-BE-PIC: mtc1 $[[R3]], $f0 @@ -313,14 +313,14 @@ entry: ; MM: addu16 $[[R1:[0-9]+]], $6, $[[R0]] ; MM: sdc1 $f12, 0($[[R1]]) -; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $7, 3 -; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]] -; MM-MNO-LE-PIC: mfc1 $[[R2:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfhc1 $[[R2:[0-9]+]], $f12 -; MM-MNO-PIC: sw16 $[[R2]], 0($[[R1]]) -; MM-MNO-LE-PIC: mfhc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-BE-PIC: mfc1 $[[R3:[0-9]+]], $f12 -; MM-MNO-PIC: sw16 $[[R3]], 4($[[R1]]) +; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $7, 3 +; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]] +; MM-MNO-LE-PIC-DAG: mfc1 $[[R2:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfhc1 $[[R2:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: sw16 $[[R2]], 0($[[R1]]) +; MM-MNO-LE-PIC-DAG: mfhc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-BE-PIC-DAG: mfc1 $[[R3:[0-9]+]], $f12 +; MM-MNO-PIC-DAG: sw16 $[[R3]], 4($[[R1]]) ; MM-STATIC-PIC: sll16 $[[R0:[0-9]+]], $7, 3 ; MM-STATIC-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]] diff --git a/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll b/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll index c644d24..812c400 100644 --- a/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll +++ b/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll @@ -336,8 +336,8 @@ entry: ; CHECK: llvm_mips_st_b_valid_range_tests: ; CHECK: ld.b -; CHECK: st.b [[R1:\$w[0-9]+]], -512( -; CHECK: st.b [[R1:\$w[0-9]+]], 511( +; CHECK-DAG: st.b [[R1:\$w[0-9]+]], -512( +; CHECK-DAG: st.b [[R1:\$w[0-9]+]], 511( ; CHECK: .size llvm_mips_st_b_valid_range_tests ; @@ -351,10 +351,10 @@ entry: } ; CHECK: llvm_mips_st_b_invalid_range_tests: -; CHECK: addiu $2, $1, -513 +; CHECK: addiu $2, $1, 512 ; CHECK: ld.b ; CHECK: st.b [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 512 +; CHECK: addiu $1, $1, -513 ; CHECK: st.b [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_b_invalid_range_tests ; @@ -404,8 +404,8 @@ entry: ; CHECK: llvm_mips_st_h_valid_range_tests: ; CHECK: ld.h -; CHECK: st.h [[R1:\$w[0-9]+]], -1024( -; CHECK: st.h [[R1:\$w[0-9]+]], 1022( +; CHECK-DAG: st.h [[R1:\$w[0-9]+]], -1024( +; CHECK-DAG: st.h [[R1:\$w[0-9]+]], 1022( ; CHECK: .size llvm_mips_st_h_valid_range_tests ; @@ -419,10 +419,10 @@ entry: } ; CHECK: llvm_mips_st_h_invalid_range_tests: -; CHECK: addiu $2, $1, -1026 +; CHECK: addiu $2, $1, 1024 ; CHECK: ld.h ; CHECK: st.h [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 1024 +; CHECK: addiu $1, $1, -1026 ; CHECK: st.h [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_h_invalid_range_tests ; @@ -472,8 +472,8 @@ entry: ; CHECK: llvm_mips_st_w_valid_range_tests: ; CHECK: ld.w -; CHECK: st.w [[R1:\$w[0-9]+]], -2048( -; CHECK: st.w [[R1:\$w[0-9]+]], 2044( +; CHECK-DAG: st.w [[R1:\$w[0-9]+]], -2048( +; CHECK-DAG: st.w [[R1:\$w[0-9]+]], 2044( ; CHECK: .size llvm_mips_st_w_valid_range_tests ; @@ -487,10 +487,10 @@ entry: } ; CHECK: llvm_mips_st_w_invalid_range_tests: -; CHECK: addiu $2, $1, -2052 +; CHECK: addiu $2, $1, 2048 ; CHECK: ld.w ; CHECK: st.w [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 2048 +; CHECK: addiu $1, $1, -2052 ; CHECK: st.w [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_w_invalid_range_tests ; @@ -540,8 +540,8 @@ entry: ; CHECK: llvm_mips_st_d_valid_range_tests: ; CHECK: ld.d -; CHECK: st.d [[R1:\$w[0-9]+]], -4096( -; CHECK: st.d [[R1:\$w[0-9]+]], 4088( +; CHECK-DAG: st.d [[R1:\$w[0-9]+]], -4096( +; CHECK-DAG: st.d [[R1:\$w[0-9]+]], 4088( ; CHECK: .size llvm_mips_st_d_valid_range_tests ; @@ -555,10 +555,10 @@ entry: } ; CHECK: llvm_mips_st_d_invalid_range_tests: -; CHECK: addiu $2, $1, -4104 +; CHECK: addiu $2, $1, 4096 ; CHECK: ld.d ; CHECK: st.d [[R1:\$w[0-9]+]], 0( -; CHECK: addiu $1, $1, 4096 +; CHECK: addiu $1, $1, -4104 ; CHECK: st.d [[R1:\$w[0-9]+]], 0( ; CHECK: .size llvm_mips_st_d_invalid_range_tests ; diff --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll index 33431db..92c5ffe 100644 --- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll +++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll @@ -45,20 +45,18 @@ declare void @callee3(float, %struct.S3* byval, %struct.S1* byval) define void @f2(float %f, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK: sw $7, 60($sp) -; CHECK: sw $6, 56($sp) -; CHECK: lw $4, 80($sp) -; CHECK: ldc1 $f[[F0:[0-9]+]], 72($sp) -; CHECK: lw $[[R3:[0-9]+]], 64($sp) -; CHECK: lw $[[R4:[0-9]+]], 68($sp) -; CHECK: lw $[[R2:[0-9]+]], 60($sp) -; CHECK: lh $[[R1:[0-9]+]], 58($sp) -; CHECK: lb $[[R0:[0-9]+]], 56($sp) -; CHECK: sw $[[R0]], 32($sp) -; CHECK: sw $[[R1]], 28($sp) -; CHECK: sw $[[R2]], 24($sp) -; CHECK: sw $[[R4]], 20($sp) -; CHECK: sw $[[R3]], 16($sp) +; CHECK-DAG: sw $7, 60($sp) +; CHECK-DAG: sw $6, 56($sp) +; CHECK-DAG: ldc1 $f[[F0:[0-9]+]], 72($sp) +; CHECK-DAG: lw $[[R3:[0-9]+]], 64($sp) +; CHECK-DAG: lw $[[R4:[0-9]+]], 68($sp) +; CHECK-DAG: lh $[[R1:[0-9]+]], 58($sp) +; CHECK-DAG: lb $[[R0:[0-9]+]], 56($sp) +; CHECK-DAG: sw $[[R0]], 32($sp) +; CHECK-DAG: sw $[[R1]], 28($sp) +; CHECK-DAG: sw $[[R4]], 20($sp) +; CHECK-DAG: sw $[[R3]], 16($sp) +; CHECK-DAG: sw $7, 24($sp) ; CHECK: mfc1 $6, $f[[F0]] %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 @@ -86,9 +84,7 @@ entry: ; CHECK: sw $6, 56($sp) ; CHECK: sw $5, 52($sp) ; CHECK: sw $4, 48($sp) -; CHECK: lw $4, 48($sp) -; CHECK: lw $[[R0:[0-9]+]], 60($sp) -; CHECK: sw $[[R0]], 24($sp) +; CHECK: sw $7, 24($sp) %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0 %tmp = load i32, i32* %arrayidx, align 4 @@ -101,14 +97,14 @@ entry: define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK: sw $7, 60($sp) -; CHECK: sw $6, 56($sp) -; CHECK: sw $5, 52($sp) -; CHECK: lw $4, 60($sp) -; CHECK: lw $[[R1:[0-9]+]], 80($sp) -; CHECK: lb $[[R0:[0-9]+]], 52($sp) -; CHECK: sw $[[R0]], 32($sp) -; CHECK: sw $[[R1]], 24($sp) +; CHECK-DAG: sw $7, 60($sp) +; CHECK-DAG: sw $6, 56($sp) +; CHECK-DAG: sw $5, 52($sp) +; CHECK-DAG: lw $[[R1:[0-9]+]], 80($sp) +; CHECK-DAG: lb $[[R0:[0-9]+]], 52($sp) +; CHECK-DAG: sw $[[R0]], 32($sp) +; CHECK-DAG: sw $[[R1]], 24($sp) +; CHECK: move $4, $7 %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 %tmp = load i32, i32* %i, align 4 diff --git a/llvm/test/CodeGen/Mips/o32_cc_vararg.ll b/llvm/test/CodeGen/Mips/o32_cc_vararg.ll index b4597a3..80a1c64 100644 --- a/llvm/test/CodeGen/Mips/o32_cc_vararg.ll +++ b/llvm/test/CodeGen/Mips/o32_cc_vararg.ll @@ -29,9 +29,9 @@ entry: ; CHECK-LABEL: va1: ; CHECK: addiu $sp, $sp, -16 +; CHECK: sw $5, 20($sp) ; CHECK: sw $7, 28($sp) ; CHECK: sw $6, 24($sp) -; CHECK: sw $5, 20($sp) ; CHECK: lw $2, 20($sp) } @@ -83,8 +83,8 @@ entry: ; CHECK-LABEL: va3: ; CHECK: addiu $sp, $sp, -16 -; CHECK: sw $7, 28($sp) ; CHECK: sw $6, 24($sp) +; CHECK: sw $7, 28($sp) ; CHECK: lw $2, 24($sp) } diff --git a/llvm/test/CodeGen/PowerPC/anon_aggr.ll b/llvm/test/CodeGen/PowerPC/anon_aggr.ll index f4e7888..6e7c196 100644 --- a/llvm/test/CodeGen/PowerPC/anon_aggr.ll +++ b/llvm/test/CodeGen/PowerPC/anon_aggr.ll @@ -60,10 +60,9 @@ equal: unequal: ret i8* %array2_ptr } - ; CHECK-LABEL: func2: -; CHECK: ld [[REG2:[0-9]+]], 72(1) -; CHECK: cmpld {{([0-9]+,)?}}4, [[REG2]] +; CHECK: cmpld {{([0-9]+,)?}}4, 6 +; CHECK: mr [[REG2:[0-9]+]], 6 ; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]] ; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]] ; CHECK: ld 3, -[[OFFSET2]](1) @@ -85,8 +84,8 @@ unequal: ; DARWIN64: mr ; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]] ; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]] -; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]] ; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]] +; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]] ; DARWIN64: ld r3, -[[OFFSET1]] ; DARWIN64: ld r3, -[[OFFSET2]] @@ -106,24 +105,24 @@ unequal: } ; CHECK-LABEL: func3: -; CHECK: ld [[REG3:[0-9]+]], 72(1) -; CHECK: ld [[REG4:[0-9]+]], 56(1) -; CHECK: cmpld {{([0-9]+,)?}}[[REG4]], [[REG3]] -; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1) +; CHECK: cmpld {{([0-9]+,)?}}4, 6 +; CHECK: mr [[REG3:[0-9]+]], 6 +; CHECK: mr [[REG4:[0-9]+]], 4 ; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1) +; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1) ; CHECK: ld 3, -[[OFFSET2]](1) ; CHECK: ld 3, -[[OFFSET1]](1) ; DARWIN32: _func3: -; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36 -; DARWIN32: addi r[[REG2:[0-9]+]], r[[REGSP]], 24 -; DARWIN32: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]]) -; DARWIN32: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]]) +; DARWIN32-DAG: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36 +; DARWIN32-DAG: addi r[[REG2:[0-9]+]], r[[REGSP]], 24 +; DARWIN32-DAG: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]]) +; DARWIN32-DAG: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]]) ; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]] -; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]] -; DARWIN32: stw r[[REG4]], -[[OFFSET2:[0-9]+]] -; DARWIN32: lwz r3, -[[OFFSET2]] -; DARWIN32: lwz r3, -[[OFFSET1]] +; DARWIN32-DAG: stw r[[REG3]], -[[OFFSET1:[0-9]+]] +; DARWIN32-DAG: stw r[[REG4]], -[[OFFSET2:[0-9]+]] +; DARWIN32-DAG: lwz r3, -[[OFFSET1:[0-9]+]] +; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]] ; DARWIN64: _func3: ; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1) diff --git a/llvm/test/CodeGen/PowerPC/complex-return.ll b/llvm/test/CodeGen/PowerPC/complex-return.ll index f6097e6..81cae48 100644 --- a/llvm/test/CodeGen/PowerPC/complex-return.ll +++ b/llvm/test/CodeGen/PowerPC/complex-return.ll @@ -24,10 +24,10 @@ entry: } ; CHECK-LABEL: foo: -; CHECK: lfd 1 -; CHECK: lfd 2 -; CHECK: lfd 3 -; CHECK: lfd 4 +; CHECK-DAG: lfd 1 +; CHECK-DAG: fmr 2 +; CHECK-DAG: lfd 3 +; CHECK-DAG: lfd 4 define { float, float } @oof() nounwind { entry: @@ -50,6 +50,6 @@ entry: } ; CHECK-LABEL: oof: -; CHECK: lfs 2 -; CHECK: lfs 1 +; CHECK-DAG: lfs 2 +; CHECK-DAG: lfs 1 diff --git a/llvm/test/CodeGen/PowerPC/jaggedstructs.ll b/llvm/test/CodeGen/PowerPC/jaggedstructs.ll index b28b34d..3907560 100644 --- a/llvm/test/CodeGen/PowerPC/jaggedstructs.ll +++ b/llvm/test/CodeGen/PowerPC/jaggedstructs.ll @@ -18,14 +18,14 @@ entry: ret void } -; CHECK: std 6, 184(1) -; CHECK: std 5, 176(1) -; CHECK: std 4, 168(1) -; CHECK: std 3, 160(1) -; CHECK: lbz {{[0-9]+}}, 167(1) -; CHECK: lhz {{[0-9]+}}, 165(1) -; CHECK: stb {{[0-9]+}}, 55(1) -; CHECK: sth {{[0-9]+}}, 53(1) +; CHECK-DAG: std 3, 160(1) +; CHECK-DAG: std 6, 184(1) +; CHECK-DAG: std 5, 176(1) +; CHECK-DAG: std 4, 168(1) +; CHECK-DAG: lbz {{[0-9]+}}, 167(1) +; CHECK-DAG: lhz {{[0-9]+}}, 165(1) +; CHECK-DAG: stb {{[0-9]+}}, 55(1) +; CHECK-DAG: sth {{[0-9]+}}, 53(1) ; CHECK: lbz {{[0-9]+}}, 175(1) ; CHECK: lwz {{[0-9]+}}, 171(1) ; CHECK: stb {{[0-9]+}}, 63(1) diff --git a/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll index c6c7e2f..1639fa7 100644 --- a/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll @@ -18,19 +18,35 @@ entry: ret ppc_fp128 %0 } -; CHECK-DAG: std 6, 72(1) -; CHECK-DAG: std 5, 64(1) -; CHECK-DAG: std 4, 56(1) -; CHECK-DAG: std 3, 48(1) -; CHECK: lfd 1, 64(1) -; CHECK: lfd 2, 72(1) +;; FIXME: Sadly, we now have an extra store to a temp variable here, +;; which comes from (roughly): +;; store i64 to i64* +;; bitcast (load i64* ) to f64 +;; The code now can elide the load, making: +;; store i64 -> +;; bitcast i64 to f64 +;; Finally, the bitcast itself turns into a store/load pair. +;; +;; This behavior is new, because previously, llvm was accidentally +;; unable to detect that the load came directly from the store, and +;; elide it. -; CHECK-VSX-DAG: std 6, 72(1) -; CHECK-VSX-DAG: std 5, 64(1) -; CHECK-VSX-DAG: std 4, 56(1) -; CHECK-VSX-DAG: std 3, 48(1) -; CHECK-VSX: li 3, 16 -; CHECK-VSX: addi 4, 1, 48 -; CHECK-VSX: lxsdx 1, 4, 3 -; CHECK-VSX: li 3, 24 -; CHECK-VSX: lxsdx 2, 4, 3 +; CHECK: std 6, 72(1) +; CHECK: std 5, 64(1) +; CHECK: std 4, 56(1) +; CHECK: std 3, 48(1) +; CHECK: std 5, -16(1) +; CHECK: std 6, -8(1) +; CHECK: lfd 1, -16(1) +; CHECK: lfd 2, -8(1) + +; CHECK-VSX: std 6, 72(1) +; CHECK-VSX: std 5, 64(1) +; CHECK-VSX: std 4, 56(1) +; CHECK-VSX: std 3, 48(1) +; CHECK-VSX: std 5, -16(1) +; CHECK-VSX: std 6, -8(1) +; CHECK-VSX: addi 3, 1, -16 +; CHECK-VSX: lxsdx 1, 0, 3 +; CHECK-VSX: addi 3, 1, -8 +; CHECK-VSX: lxsdx 2, 0, 3 diff --git a/llvm/test/CodeGen/PowerPC/structsinmem.ll b/llvm/test/CodeGen/PowerPC/structsinmem.ll index 3777f3e..01b0848 100644 --- a/llvm/test/CodeGen/PowerPC/structsinmem.ll +++ b/llvm/test/CodeGen/PowerPC/structsinmem.ll @@ -113,13 +113,13 @@ entry: %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lha {{[0-9]+}}, 132(1) -; CHECK: lbz {{[0-9]+}}, 119(1) -; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lwz {{[0-9]+}}, 144(1) -; CHECK: lwz {{[0-9]+}}, 152(1) -; CHECK: lwz {{[0-9]+}}, 160(1) +; CHECK-DAG: lha {{[0-9]+}}, 126(1) +; CHECK-DAG: lha {{[0-9]+}}, 132(1) +; CHECK-DAG: lbz {{[0-9]+}}, 119(1) +; CHECK-DAG: lwz {{[0-9]+}}, 140(1) +; CHECK-DAG: lwz {{[0-9]+}}, 144(1) +; CHECK-DAG: lwz {{[0-9]+}}, 152(1) +; CHECK-DAG: lwz {{[0-9]+}}, 160(1) } define i32 @caller2() nounwind { @@ -205,11 +205,11 @@ entry: %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lha {{[0-9]+}}, 133(1) -; CHECK: lbz {{[0-9]+}}, 119(1) -; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lwz {{[0-9]+}}, 147(1) -; CHECK: lwz {{[0-9]+}}, 154(1) -; CHECK: lwz {{[0-9]+}}, 161(1) +; CHECK-DAG: lha {{[0-9]+}}, 126(1) +; CHECK-DAG: lha {{[0-9]+}}, 133(1) +; CHECK-DAG: lbz {{[0-9]+}}, 119(1) +; CHECK-DAG: lwz {{[0-9]+}}, 140(1) +; CHECK-DAG: lwz {{[0-9]+}}, 147(1) +; CHECK-DAG: lwz {{[0-9]+}}, 154(1) +; CHECK-DAG: lwz {{[0-9]+}}, 161(1) } diff --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll index e27041d..54679f2 100644 --- a/llvm/test/CodeGen/PowerPC/structsinregs.ll +++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll @@ -59,6 +59,7 @@ entry: %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) ret i32 %call +; CHECK-LABEL: caller1 ; CHECK: ld 9, 112(31) ; CHECK: ld 8, 120(31) ; CHECK: ld 7, 128(31) @@ -97,20 +98,21 @@ entry: %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: std 9, 96(1) -; CHECK: std 8, 88(1) -; CHECK: std 7, 80(1) -; CHECK: stw 6, 76(1) -; CHECK: stw 5, 68(1) -; CHECK: sth 4, 62(1) -; CHECK: stb 3, 55(1) -; CHECK: lha {{[0-9]+}}, 62(1) -; CHECK: lha {{[0-9]+}}, 68(1) -; CHECK: lbz {{[0-9]+}}, 55(1) -; CHECK: lwz {{[0-9]+}}, 76(1) -; CHECK: lwz {{[0-9]+}}, 80(1) -; CHECK: lwz {{[0-9]+}}, 88(1) -; CHECK: lwz {{[0-9]+}}, 96(1) +; CHECK-LABEL: callee1 +; CHECK-DAG: std 9, 96(1) +; CHECK-DAG: std 8, 88(1) +; CHECK-DAG: std 7, 80(1) +; CHECK-DAG: stw 6, 76(1) +; CHECK-DAG: stw 5, 68(1) +; CHECK-DAG: sth 4, 62(1) +; CHECK-DAG: stb 3, 55(1) +; CHECK-DAG: lha {{[0-9]+}}, 62(1) +; CHECK-DAG: lha {{[0-9]+}}, 68(1) +; CHECK-DAG: lbz {{[0-9]+}}, 55(1) +; CHECK-DAG: lwz {{[0-9]+}}, 76(1) +; CHECK-DAG: lwz {{[0-9]+}}, 80(1) +; CHECK-DAG: lwz {{[0-9]+}}, 88(1) +; CHECK-DAG: lwz {{[0-9]+}}, 96(1) } define i32 @caller2() nounwind { @@ -139,6 +141,7 @@ entry: %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) ret i32 %call +; CHECK-LABEL: caller2 ; CHECK: stb {{[0-9]+}}, 71(1) ; CHECK: sth {{[0-9]+}}, 69(1) ; CHECK: stb {{[0-9]+}}, 87(1) @@ -184,18 +187,19 @@ entry: %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: std 9, 96(1) -; CHECK: std 8, 88(1) -; CHECK: std 7, 80(1) -; CHECK: stw 6, 76(1) -; CHECK: std 5, 64(1) -; CHECK: sth 4, 62(1) -; CHECK: stb 3, 55(1) -; CHECK: lha {{[0-9]+}}, 62(1) -; CHECK: lha {{[0-9]+}}, 69(1) -; CHECK: lbz {{[0-9]+}}, 55(1) -; CHECK: lwz {{[0-9]+}}, 76(1) -; CHECK: lwz {{[0-9]+}}, 83(1) -; CHECK: lwz {{[0-9]+}}, 90(1) -; CHECK: lwz {{[0-9]+}}, 97(1) +; CHECK-LABEL: callee2 +; CHECK-DAG: std 9, 96(1) +; CHECK-DAG: std 8, 88(1) +; CHECK-DAG: std 7, 80(1) +; CHECK-DAG: stw 6, 76(1) +; CHECK-DAG: std 5, 64(1) +; CHECK-DAG: sth 4, 62(1) +; CHECK-DAG: stb 3, 55(1) +; CHECK-DAG: lha {{[0-9]+}}, 62(1) +; CHECK-DAG: lha {{[0-9]+}}, 69(1) +; CHECK-DAG: lbz {{[0-9]+}}, 55(1) +; CHECK-DAG: lwz {{[0-9]+}}, 76(1) +; CHECK-DAG: lwz {{[0-9]+}}, 83(1) +; CHECK-DAG: lwz {{[0-9]+}}, 90(1) +; CHECK-DAG: lwz {{[0-9]+}}, 97(1) } diff --git a/llvm/test/CodeGen/SystemZ/unaligned-01.ll b/llvm/test/CodeGen/SystemZ/unaligned-01.ll index 94cad0e..2af1aa7 100644 --- a/llvm/test/CodeGen/SystemZ/unaligned-01.ll +++ b/llvm/test/CodeGen/SystemZ/unaligned-01.ll @@ -1,10 +1,7 @@ ; Check that unaligned accesses are allowed in general. We check the ; few exceptions (like CRL) in their respective test files. ; -; FIXME: -combiner-alias-analysis (the default for SystemZ) stops -; f1 from being optimized. -; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis=false \ -; RUN: | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s ; Check that these four byte stores become a single word store. define void @f1(i8 *%ptr) { diff --git a/llvm/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll b/llvm/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll index 2f8e36b..08349a3 100644 --- a/llvm/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll +++ b/llvm/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll @@ -9,9 +9,9 @@ define void @_Z19getClosestDiagonal3ii(%0* noalias sret, i32, i32) nounwind { ; CHECK: bl ___muldf3 -; CHECK: bl ___muldf3 ; CHECK: beq LBB0 ; CHECK: bl ___muldf3 +; CHECK: bl ___muldf3 ;