From d68458bd56d9d55b05fca5447891aa8752d70509 Mon Sep 17 00:00:00 2001 From: Christudasan Devadasan Date: Wed, 23 Dec 2020 12:22:36 +0530 Subject: [PATCH] [GlobalISel] Base implementation for sret demotion. If the return values can't be lowered to registers SelectionDAG performs the sret demotion. This patch contains the basic implementation for the same in the GlobalISel pipeline. Furthermore, targets should bring relevant changes during lowerFormalArguments, lowerReturn and lowerCall to make use of this feature. Reviewed By: arsenm Differential Revision: https://reviews.llvm.org/D92953 --- .../include/llvm/CodeGen/GlobalISel/CallLowering.h | 99 ++++++++++-- llvm/lib/CodeGen/GlobalISel/CallLowering.cpp | 173 ++++++++++++++++++++- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 6 +- .../Target/AArch64/GISel/AArch64CallLowering.cpp | 3 +- .../lib/Target/AArch64/GISel/AArch64CallLowering.h | 5 +- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 10 +- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h | 6 +- llvm/lib/Target/ARM/ARMCallLowering.cpp | 11 +- llvm/lib/Target/ARM/ARMCallLowering.h | 6 +- llvm/lib/Target/Mips/MipsCallLowering.cpp | 11 +- llvm/lib/Target/Mips/MipsCallLowering.h | 6 +- llvm/lib/Target/PowerPC/GISel/PPCCallLowering.cpp | 8 +- llvm/lib/Target/PowerPC/GISel/PPCCallLowering.h | 5 +- llvm/lib/Target/RISCV/RISCVCallLowering.cpp | 11 +- llvm/lib/Target/RISCV/RISCVCallLowering.h | 6 +- llvm/lib/Target/X86/X86CallLowering.cpp | 13 +- llvm/lib/Target/X86/X86CallLowering.h | 6 +- llvm/tools/llvm-exegesis/lib/Assembler.cpp | 7 +- 18 files changed, 330 insertions(+), 62 deletions(-) diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h index dbd7e00..dff73d1 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -19,6 +19,7 @@ #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/TargetCallingConv.h" +#include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Type.h" #include "llvm/Support/ErrorHandling.h" @@ -31,6 +32,7 @@ namespace llvm { class CallBase; class DataLayout; class Function; +class FunctionLoweringInfo; class MachineIRBuilder; struct MachinePointerInfo; class MachineRegisterInfo; @@ -42,21 +44,30 @@ class CallLowering { virtual void anchor(); public: - struct ArgInfo { + struct BaseArgInfo { + Type *Ty; + SmallVector Flags; + bool IsFixed; + + BaseArgInfo(Type *Ty, + ArrayRef Flags = ArrayRef(), + bool IsFixed = true) + : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {} + + BaseArgInfo() : Ty(nullptr), IsFixed(false) {} + }; + + struct ArgInfo : public BaseArgInfo { SmallVector Regs; // If the argument had to be split into multiple parts according to the // target calling convention, then this contains the original vregs // if the argument was an incoming arg. SmallVector OrigRegs; - Type *Ty; - SmallVector Flags; - bool IsFixed; ArgInfo(ArrayRef Regs, Type *Ty, ArrayRef Flags = ArrayRef(), bool IsFixed = true) - : Regs(Regs.begin(), Regs.end()), Ty(Ty), - Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) { + : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) { if (!Regs.empty() && Flags.empty()) this->Flags.push_back(ISD::ArgFlagsTy()); // FIXME: We should have just one way of saying "no register". @@ -65,7 +76,7 @@ public: "only void types should have no register"); } - ArgInfo() : Ty(nullptr), IsFixed(false) {} + ArgInfo() : BaseArgInfo() {} }; struct CallLoweringInfo { @@ -101,6 +112,15 @@ public: /// True if the call is to a vararg function. bool IsVarArg = false; + + /// True if the function's return value can be lowered to registers. + bool CanLowerReturn = true; + + /// VReg to hold the hidden sret parameter. + Register DemoteRegister; + + /// The stack index for sret demotion. + int DemoteStackIndex; }; /// Argument handling is mostly uniform between the four places that @@ -292,20 +312,73 @@ public: return false; } + /// Load the returned value from the stack into virtual registers in \p VRegs. + /// It uses the frame index \p FI and the start offset from \p DemoteReg. + /// The loaded data size will be determined from \p RetTy. + void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, + ArrayRef VRegs, Register DemoteReg, + int FI) const; + + /// Store the return value given by \p VRegs into stack starting at the offset + /// specified in \p DemoteReg. + void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, + ArrayRef VRegs, Register DemoteReg) const; + + /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs. + /// This function should be called from the target specific + /// lowerFormalArguments when \p F requires the sret demotion. + void insertSRetIncomingArgument(const Function &F, + SmallVectorImpl &SplitArgs, + Register &DemoteReg, MachineRegisterInfo &MRI, + const DataLayout &DL) const; + + /// For the call-base described by \p CB, insert the hidden sret ArgInfo to + /// the OrigArgs field of \p Info. + void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, + const CallBase &CB, + CallLoweringInfo &Info) const; + + /// \return True if the return type described by \p Outs can be returned + /// without performing sret demotion. + bool checkReturn(CCState &CCInfo, SmallVectorImpl &Outs, + CCAssignFn *Fn) const; + + /// Get the type and the ArgFlags for the split components of \p RetTy as + /// returned by \c ComputeValueVTs. + void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, + SmallVectorImpl &Outs, + const DataLayout &DL) const; + + /// Toplevel function to check the return type based on the target calling + /// convention. \return True if the return value of \p MF can be returned + /// without performing sret demotion. + bool checkReturnTypeForCallConv(MachineFunction &MF) const; + + /// This hook must be implemented to check whether the return values + /// described by \p Outs can fit into the return registers. If false + /// is returned, an sret-demotion is performed. + virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, + SmallVectorImpl &Outs, bool IsVarArg, + LLVMContext &Context) const { + return true; + } + /// This hook must be implemented to lower outgoing return values, described /// by \p Val, into the specified virtual registers \p VRegs. /// This hook is used by GlobalISel. /// + /// \p FLI is required for sret demotion. + /// /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter /// that needs to be implicitly returned. /// /// \return True if the lowering succeeds, false otherwise. virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, + ArrayRef VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const { if (!supportSwiftError()) { assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror"); - return lowerReturn(MIRBuilder, Val, VRegs); + return lowerReturn(MIRBuilder, Val, VRegs, FLI); } return false; } @@ -313,7 +386,8 @@ public: /// This hook behaves as the extended lowerReturn function, but for targets /// that do not support swifterror value promotion. virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const { return false; } @@ -326,12 +400,13 @@ public: /// the second in \c VRegs[1], and so on. For each argument, there will be one /// register for each non-aggregate type, as returned by \c computeValueLLTs. /// \p MIRBuilder is set to the proper insertion for the argument - /// lowering. + /// lowering. \p FLI is required for sret demotion. /// /// \return True if the lowering succeeded, false otherwise. virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { return false; } diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index c32811d..e1591a4 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -89,6 +89,24 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, .getFnAttribute("disable-tail-calls") .getValueAsString() != "true"); + CallingConv::ID CallConv = CB.getCallingConv(); + Type *RetTy = CB.getType(); + bool IsVarArg = CB.getFunctionType()->isVarArg(); + + SmallVector SplitArgs; + getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL); + Info.CanLowerReturn = + canLowerReturn(MF, CallConv, SplitArgs, IsVarArg, RetTy->getContext()); + + if (!Info.CanLowerReturn) { + // Callee requires sret demotion. + insertSRetOutgoingArgument(MIRBuilder, CB, Info); + + // The sret demotion isn't compatible with tail-calls, since the sret + // argument points into the caller's stack frame. + CanBeTailCalled = false; + } + // First step is to marshall all the function's parameters into the correct // physregs and memory locations. Gather the sequence of argument types that // we'll pass to the assigner function. @@ -116,16 +134,16 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, else Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); - Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}}; + Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}}; if (!Info.OrigRet.Ty->isVoidTy()) setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); - Info.CallConv = CB.getCallingConv(); + Info.CallConv = CallConv; Info.SwiftErrorVReg = SwiftErrorVReg; Info.IsMustTailCall = CB.isMustTailCall(); Info.IsTailCall = CanBeTailCalled; - Info.IsVarArg = CB.getFunctionType()->isVarArg(); + Info.IsVarArg = IsVarArg; return lowerCall(MIRBuilder, Info); } @@ -429,6 +447,155 @@ bool CallLowering::handleAssignments(CCState &CCInfo, return true; } +void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, + ArrayRef VRegs, Register DemoteReg, + int FI) const { + MachineFunction &MF = MIRBuilder.getMF(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const DataLayout &DL = MF.getDataLayout(); + + SmallVector SplitVTs; + SmallVector Offsets; + ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0); + + assert(VRegs.size() == SplitVTs.size()); + + unsigned NumValues = SplitVTs.size(); + Align BaseAlign = DL.getPrefTypeAlign(RetTy); + Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace()); + LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL); + + MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); + + for (unsigned I = 0; I < NumValues; ++I) { + Register Addr; + MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]); + auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, + MRI.getType(VRegs[I]).getSizeInBytes(), + commonAlignment(BaseAlign, Offsets[I])); + MIRBuilder.buildLoad(VRegs[I], Addr, *MMO); + } +} + +void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, + ArrayRef VRegs, + Register DemoteReg) const { + MachineFunction &MF = MIRBuilder.getMF(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const DataLayout &DL = MF.getDataLayout(); + + SmallVector SplitVTs; + SmallVector Offsets; + ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0); + + assert(VRegs.size() == SplitVTs.size()); + + unsigned NumValues = SplitVTs.size(); + Align BaseAlign = DL.getPrefTypeAlign(RetTy); + unsigned AS = DL.getAllocaAddrSpace(); + LLT OffsetLLTy = + getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL); + + MachinePointerInfo PtrInfo(AS); + + for (unsigned I = 0; I < NumValues; ++I) { + Register Addr; + MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]); + auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, + MRI.getType(VRegs[I]).getSizeInBytes(), + commonAlignment(BaseAlign, Offsets[I])); + MIRBuilder.buildStore(VRegs[I], Addr, *MMO); + } +} + +void CallLowering::insertSRetIncomingArgument( + const Function &F, SmallVectorImpl &SplitArgs, Register &DemoteReg, + MachineRegisterInfo &MRI, const DataLayout &DL) const { + unsigned AS = DL.getAllocaAddrSpace(); + DemoteReg = MRI.createGenericVirtualRegister( + LLT::pointer(AS, DL.getPointerSizeInBits(AS))); + + Type *PtrTy = PointerType::get(F.getReturnType(), AS); + + SmallVector ValueVTs; + ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs); + + // NOTE: Assume that a pointer won't get split into more than one VT. + assert(ValueVTs.size() == 1); + + ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext())); + setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F); + DemoteArg.Flags[0].setSRet(); + SplitArgs.insert(SplitArgs.begin(), DemoteArg); +} + +void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, + const CallBase &CB, + CallLoweringInfo &Info) const { + const DataLayout &DL = MIRBuilder.getDataLayout(); + Type *RetTy = CB.getType(); + unsigned AS = DL.getAllocaAddrSpace(); + LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS)); + + int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject( + DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false); + + Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0); + ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS)); + setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB); + DemoteArg.Flags[0].setSRet(); + + Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg); + Info.DemoteStackIndex = FI; + Info.DemoteRegister = DemoteReg; +} + +bool CallLowering::checkReturn(CCState &CCInfo, + SmallVectorImpl &Outs, + CCAssignFn *Fn) const { + for (unsigned I = 0, E = Outs.size(); I < E; ++I) { + MVT VT = MVT::getVT(Outs[I].Ty); + if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo)) + return false; + } + return true; +} + +void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy, + AttributeList Attrs, + SmallVectorImpl &Outs, + const DataLayout &DL) const { + LLVMContext &Context = RetTy->getContext(); + ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); + + SmallVector SplitVTs; + ComputeValueVTs(*TLI, DL, RetTy, SplitVTs); + addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex); + + for (EVT VT : SplitVTs) { + unsigned NumParts = + TLI->getNumRegistersForCallingConv(Context, CallConv, VT); + MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT); + Type *PartTy = EVT(RegVT).getTypeForEVT(Context); + + for (unsigned I = 0; I < NumParts; ++I) { + Outs.emplace_back(PartTy, Flags); + } + } +} + +bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const { + const auto &F = MF.getFunction(); + Type *ReturnType = F.getReturnType(); + CallingConv::ID CallConv = F.getCallingConv(); + + SmallVector SplitArgs; + getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs, + MF.getDataLayout()); + return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg(), + ReturnType->getContext()); +} + bool CallLowering::analyzeArgInfo(CCState &CCState, SmallVectorImpl &Args, CCAssignFn &AssignFnFixed, diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index c808217..67e8027 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -368,7 +368,7 @@ bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { // The target may mess up with the insertion point, but // this is not important as a return is the last instruction // of the block anyway. - return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg); + return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg); } void IRTranslator::emitBranchForMergedCondition( @@ -3067,6 +3067,8 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { else FuncInfo.BPI = nullptr; + FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF); + const auto &TLI = *MF->getSubtarget().getTargetLowering(); SL = std::make_unique(this, FuncInfo); @@ -3140,7 +3142,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { } } - if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { + if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) { OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", F.getSubprogram(), &F.getEntryBlock()); R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index f272204..c6be200 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -274,6 +274,7 @@ void AArch64CallLowering::splitToValueTypes( bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, + FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const { auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR); assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && @@ -440,7 +441,7 @@ bool AArch64CallLowering::fallBackToDAGISel(const Function &F) const { bool AArch64CallLowering::lowerFormalArguments( MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { + ArrayRef> VRegs, FunctionLoweringInfo &FLI) const { MachineFunction &MF = MIRBuilder.getMF(); MachineBasicBlock &MBB = MIRBuilder.getMBB(); MachineRegisterInfo &MRI = MF.getRegInfo(); diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h index 640a862..1f45c9e 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h @@ -34,13 +34,14 @@ public: AArch64CallLowering(const AArch64TargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, + ArrayRef VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override; bool fallBackToDAGISel(const Function &F) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index cdea537..bf8e57d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -447,9 +447,9 @@ bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, return handleAssignments(B, SplitRetInfos, RetHandler); } -bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, - const Value *Val, - ArrayRef VRegs) const { +bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const { MachineFunction &MF = B.getMF(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -775,8 +775,8 @@ static void packSplitRegsToOrigType(MachineIRBuilder &B, } bool AMDGPUCallLowering::lowerFormalArguments( - MachineIRBuilder &B, const Function &F, - ArrayRef> VRegs) const { + MachineIRBuilder &B, const Function &F, ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { CallingConv::ID CC = F.getCallingConv(); // The infrastructure for normal calling convention lowering is essentially diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h index 4d78a4f..45d5488 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h @@ -47,13 +47,15 @@ public: AMDGPUCallLowering(const AMDGPUTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &B, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerFormalArgumentsKernel(MachineIRBuilder &B, const Function &F, ArrayRef> VRegs) const; bool lowerFormalArguments(MachineIRBuilder &B, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool passSpecialInputs(MachineIRBuilder &MIRBuilder, CCState &CCInfo, diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp index 0a38f737..10d66fb 100644 --- a/llvm/lib/Target/ARM/ARMCallLowering.cpp +++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -263,8 +263,8 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, } bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, - const Value *Val, - ArrayRef VRegs) const { + const Value *Val, ArrayRef VRegs, + FunctionLoweringInfo &FLI) const { assert(!Val == VRegs.empty() && "Return value without a vreg"); auto const &ST = MIRBuilder.getMF().getSubtarget(); @@ -410,9 +410,10 @@ struct FormalArgHandler : public ARMIncomingValueHandler { } // end anonymous namespace -bool ARMCallLowering::lowerFormalArguments( - MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { +bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, + const Function &F, + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { auto &TLI = *getTLI(); auto Subtarget = TLI.getSubtarget(); diff --git a/llvm/lib/Target/ARM/ARMCallLowering.h b/llvm/lib/Target/ARM/ARMCallLowering.h index ddbc9fe..3be73d4 100644 --- a/llvm/lib/Target/ARM/ARMCallLowering.h +++ b/llvm/lib/Target/ARM/ARMCallLowering.h @@ -33,10 +33,12 @@ public: ARMCallLowering(const ARMTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp index ade8953..377aa48 100644 --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -374,8 +374,8 @@ static void setLocInfo(SmallVectorImpl &ArgLocs, } bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, - const Value *Val, - ArrayRef VRegs) const { + const Value *Val, ArrayRef VRegs, + FunctionLoweringInfo &FLI) const { MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); @@ -413,9 +413,10 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, return true; } -bool MipsCallLowering::lowerFormalArguments( - MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { +bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, + const Function &F, + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { // Quick exit if there aren't any args. if (F.arg_empty()) diff --git a/llvm/lib/Target/Mips/MipsCallLowering.h b/llvm/lib/Target/Mips/MipsCallLowering.h index 6e43e55..1c1c208 100644 --- a/llvm/lib/Target/Mips/MipsCallLowering.h +++ b/llvm/lib/Target/Mips/MipsCallLowering.h @@ -64,10 +64,12 @@ public: MipsCallLowering(const MipsTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; diff --git a/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.cpp b/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.cpp index dea28e9..e8f8cbf 100644 --- a/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.cpp +++ b/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.cpp @@ -25,6 +25,7 @@ PPCCallLowering::PPCCallLowering(const PPCTargetLowering &TLI) bool PPCCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, + FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const { assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && "Return value without a vreg"); @@ -35,9 +36,10 @@ bool PPCCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, return true; } -bool PPCCallLowering::lowerFormalArguments( - MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { +bool PPCCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, + const Function &F, + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { // If VRegs is empty, then there are no formal arguments to lower and thus can // always return true. If there are formal arguments, we currently do not diff --git a/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.h b/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.h index ef078aa..5a449f4 100644 --- a/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.h +++ b/llvm/lib/Target/PowerPC/GISel/PPCCallLowering.h @@ -27,10 +27,11 @@ public: PPCCallLowering(const PPCTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, + ArrayRef VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; }; diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp index c63a847..d265f3a 100644 --- a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp @@ -22,8 +22,8 @@ RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI) : CallLowering(&TLI) {} bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, - const Value *Val, - ArrayRef VRegs) const { + const Value *Val, ArrayRef VRegs, + FunctionLoweringInfo &FLI) const { MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET); @@ -34,9 +34,10 @@ bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, return true; } -bool RISCVCallLowering::lowerFormalArguments( - MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { +bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, + const Function &F, + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { if (F.arg_empty()) return true; diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.h b/llvm/lib/Target/RISCV/RISCVCallLowering.h index 7ce074a..cd7fc4c 100644 --- a/llvm/lib/Target/RISCV/RISCVCallLowering.h +++ b/llvm/lib/Target/RISCV/RISCVCallLowering.h @@ -28,10 +28,12 @@ public: RISCVCallLowering(const RISCVTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp index dee629a..53f5756 100644 --- a/llvm/lib/Target/X86/X86CallLowering.cpp +++ b/llvm/lib/Target/X86/X86CallLowering.cpp @@ -184,9 +184,9 @@ protected: } // end anonymous namespace -bool X86CallLowering::lowerReturn( - MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { +bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, + const Value *Val, ArrayRef VRegs, + FunctionLoweringInfo &FLI) const { assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && "Return value without a vreg"); auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0); @@ -322,9 +322,10 @@ protected: } // end anonymous namespace -bool X86CallLowering::lowerFormalArguments( - MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const { +bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, + const Function &F, + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const { if (F.arg_empty()) return true; diff --git a/llvm/lib/Target/X86/X86CallLowering.h b/llvm/lib/Target/X86/X86CallLowering.h index b5ea778..9390122 100644 --- a/llvm/lib/Target/X86/X86CallLowering.h +++ b/llvm/lib/Target/X86/X86CallLowering.h @@ -29,10 +29,12 @@ public: X86CallLowering(const X86TargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef> VRegs) const override; + ArrayRef> VRegs, + FunctionLoweringInfo &FLI) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; diff --git a/llvm/tools/llvm-exegesis/lib/Assembler.cpp b/llvm/tools/llvm-exegesis/lib/Assembler.cpp index 523cb91..c5be3bf 100644 --- a/llvm/tools/llvm-exegesis/lib/Assembler.cpp +++ b/llvm/tools/llvm-exegesis/lib/Assembler.cpp @@ -11,6 +11,7 @@ #include "SnippetRepetitor.h" #include "Target.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GlobalISel/CallLowering.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -128,7 +129,11 @@ void BasicBlockFiller::addReturn(const DebugLoc &DL) { } else { MachineIRBuilder MIB(MF); MIB.setMBB(*MBB); - MF.getSubtarget().getCallLowering()->lowerReturn(MIB, nullptr, {}); + + FunctionLoweringInfo FuncInfo; + FuncInfo.CanLowerReturn = true; + MF.getSubtarget().getCallLowering()->lowerReturn(MIB, nullptr, {}, + FuncInfo); } } -- 2.7.4