From: Amara Emerson Date: Fri, 6 Aug 2021 19:42:03 +0000 (-0700) Subject: Change TargetLowering::canMergeStoresTo() to take a MF instead of DAG. X-Git-Tag: upstream/15.0.7~34439 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2b067e333572a26431d1f3f4e8c226f0f7cccc0a;p=platform%2Fupstream%2Fllvm.git Change TargetLowering::canMergeStoresTo() to take a MF instead of DAG. DAG is unnecessary and we need this hook to implement store merging on GlobalISel too. --- diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 5506d33..d0328b1 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -591,7 +591,7 @@ public: /// Returns if it's reasonable to merge stores to MemVT size. virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, - const SelectionDAG &DAG) const { + const MachineFunction &MF) const { return true; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 065ae7e..c07a75c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -17394,7 +17394,8 @@ bool DAGCombiner::tryStoreMergeOfConstants( break; if (TLI.isTypeLegal(StoreTy) && - TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) && + TLI.canMergeStoresTo(FirstStoreAS, StoreTy, + DAG.getMachineFunction()) && TLI.allowsMemoryAccess(Context, DL, StoreTy, *FirstInChain->getMemOperand(), &IsFast) && IsFast) { @@ -17406,7 +17407,8 @@ bool DAGCombiner::tryStoreMergeOfConstants( EVT LegalizedStoredValTy = TLI.getTypeToTransformTo(Context, StoredVal.getValueType()); if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) && - TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) && + TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, + DAG.getMachineFunction()) && TLI.allowsMemoryAccess(Context, DL, StoreTy, *FirstInChain->getMemOperand(), &IsFast) && IsFast) { @@ -17425,7 +17427,7 @@ bool DAGCombiner::tryStoreMergeOfConstants( unsigned Elts = (i + 1) * NumMemElts; EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts); if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) && - TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) && + TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG.getMachineFunction()) && TLI.allowsMemoryAccess(Context, DL, Ty, *FirstInChain->getMemOperand(), &IsFast) && IsFast) @@ -17501,7 +17503,8 @@ bool DAGCombiner::tryStoreMergeOfExtracts( if (Ty.getSizeInBits() > MaximumLegalStoreInBits) break; - if (TLI.isTypeLegal(Ty) && TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) && + if (TLI.isTypeLegal(Ty) && + TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG.getMachineFunction()) && TLI.allowsMemoryAccess(Context, DL, Ty, *FirstInChain->getMemOperand(), &IsFast) && IsFast) @@ -17650,7 +17653,8 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl &StoreNodes, bool IsFastSt = false; bool IsFastLd = false; if (TLI.isTypeLegal(StoreTy) && - TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) && + TLI.canMergeStoresTo(FirstStoreAS, StoreTy, + DAG.getMachineFunction()) && TLI.allowsMemoryAccess(Context, DL, StoreTy, *FirstInChain->getMemOperand(), &IsFastSt) && IsFastSt && @@ -17664,7 +17668,8 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl &StoreNodes, unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8; StoreTy = EVT::getIntegerVT(Context, SizeInBits); if (TLI.isTypeLegal(StoreTy) && - TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) && + TLI.canMergeStoresTo(FirstStoreAS, StoreTy, + DAG.getMachineFunction()) && TLI.allowsMemoryAccess(Context, DL, StoreTy, *FirstInChain->getMemOperand(), &IsFastSt) && IsFastSt && @@ -17678,7 +17683,8 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl &StoreNodes, TargetLowering::TypePromoteInteger) { EVT LegalizedStoredValTy = TLI.getTypeToTransformTo(Context, StoreTy); if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) && - TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) && + TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, + DAG.getMachineFunction()) && TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValTy, StoreTy) && TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy, StoreTy) && TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) && diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index f2663e2..0a4c48b 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -701,12 +701,11 @@ public: bool isIntDivCheap(EVT VT, AttributeList Attr) const override; bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, - const SelectionDAG &DAG) const override { + const MachineFunction &MF) const override { // Do not merge to float value size (128 bytes) if no implicit // float attribute is set. - bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( - Attribute::NoImplicitFloat); + bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat); if (NoFloat) return (MemVT.getSizeInBits() <= 64); diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp index 002ef18..a7f634b 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -18,6 +18,7 @@ #include "R600InstrInfo.h" #include "R600MachineFunctionInfo.h" #include "R600Subtarget.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/IR/IntrinsicsR600.h" @@ -1564,7 +1565,7 @@ EVT R600TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, } bool R600TargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, - const SelectionDAG &DAG) const { + const MachineFunction &MF) const { // Local and Private addresses do not handle vectors. Limit to i32 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS)) { return (MemVT.getSizeInBits() <= 32); diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.h b/llvm/lib/Target/AMDGPU/R600ISelLowering.h index 920cf3cd..f9a9a61 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.h +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.h @@ -15,6 +15,7 @@ #define LLVM_LIB_TARGET_AMDGPU_R600ISELLOWERING_H #include "AMDGPUISelLowering.h" +#include "llvm/CodeGen/MachineFunction.h" namespace llvm { @@ -47,7 +48,7 @@ public: EVT VT) const override; bool canMergeStoresTo(unsigned AS, EVT MemVT, - const SelectionDAG &DAG) const override; + const MachineFunction &MF) const override; bool allowsMisalignedMemoryAccesses( EVT VT, unsigned AS, Align Alignment, diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index eba1910..41dcffd 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -23,6 +23,7 @@ #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/IntrinsicInst.h" @@ -1427,7 +1428,7 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, } bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, - const SelectionDAG &DAG) const { + const MachineFunction &MF) const { if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { return (MemVT.getSizeInBits() <= 4 * 32); } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index f3d3426..1e48c96 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -16,6 +16,7 @@ #include "AMDGPUISelLowering.h" #include "AMDGPUArgumentUsageInfo.h" +#include "llvm/CodeGen/MachineFunction.h" namespace llvm { @@ -267,7 +268,7 @@ public: Instruction *I = nullptr) const override; bool canMergeStoresTo(unsigned AS, EVT MemVT, - const SelectionDAG &DAG) const override; + const MachineFunction &MF) const override; bool allowsMisalignedMemoryAccessesImpl( unsigned Size, unsigned AddrSpace, Align Alignment, diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 844b7d4..360fe69 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -680,7 +680,7 @@ class VectorType; unsigned &Cost) const override; bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, - const SelectionDAG &DAG) const override { + const MachineFunction &MF) const override { // Do not merge to larger than i32. return (MemVT.getSizeInBits() <= 32); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index caa5f0d..a147993 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5387,11 +5387,10 @@ bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, } bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT, - const SelectionDAG &DAG) const { + const MachineFunction &MF) const { // Do not merge to float value size (128 bytes) if no implicit // float attribute is set. - bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( - Attribute::NoImplicitFloat); + bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat); if (NoFloat) { unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32; diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 869857b..e9a27e5 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -14,6 +14,7 @@ #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/TargetLowering.h" namespace llvm { @@ -989,7 +990,7 @@ namespace llvm { } bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, - const SelectionDAG &DAG) const override; + const MachineFunction &MF) const override; bool isCheapToSpeculateCttz() const override;