From: Matthias Braun Date: Wed, 30 Mar 2016 22:46:04 +0000 (+0000) Subject: CodeGen: Factor out code for tail call result compatibility check; NFC X-Git-Tag: llvmorg-3.9.0-rc1~10437 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8d41436004635a796caf2bbe2606a9a083675bb5;p=platform%2Fupstream%2Fllvm.git CodeGen: Factor out code for tail call result compatibility check; NFC llvm-svn: 264959 --- diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h index faf4896..92e5856 100644 --- a/llvm/include/llvm/CodeGen/CallingConvLower.h +++ b/llvm/include/llvm/CodeGen/CallingConvLower.h @@ -513,6 +513,14 @@ public: SmallVectorImpl &Forwards, ArrayRef RegParmTypes, CCAssignFn Fn); + /// Returns true if the results of the two calling conventions are compatible. + /// This is usually part of the check for tailcall eligibility. + static bool resultsCompatible(CallingConv::ID CalleeCC, + CallingConv::ID CallerCC, MachineFunction &MF, + LLVMContext &C, + const SmallVectorImpl &Ins, + CCAssignFn CalleeFn, CCAssignFn CallerFn); + private: /// MarkAllocated - Mark a register and all of its aliases as allocated. void MarkAllocated(unsigned Reg); diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp index 2a8a9e7..7d67bcf 100644 --- a/llvm/lib/CodeGen/CallingConvLower.cpp +++ b/llvm/lib/CodeGen/CallingConvLower.cpp @@ -249,3 +249,39 @@ void CCState::analyzeMustTailForwardedRegisters( } } } + +bool CCState::resultsCompatible(CallingConv::ID CalleeCC, + CallingConv::ID CallerCC, MachineFunction &MF, + LLVMContext &C, + const SmallVectorImpl &Ins, + CCAssignFn CalleeFn, CCAssignFn CallerFn) { + if (CalleeCC == CallerCC) + return true; + SmallVector RVLocs1; + CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C); + CCInfo1.AnalyzeCallResult(Ins, CalleeFn); + + SmallVector RVLocs2; + CCState CCInfo2(CallerCC, false, MF, RVLocs2, C); + CCInfo2.AnalyzeCallResult(Ins, CallerFn); + + if (RVLocs1.size() != RVLocs2.size()) + return false; + for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) { + const CCValAssign &Loc1 = RVLocs1[I]; + const CCValAssign &Loc2 = RVLocs2[I]; + if (Loc1.getLocInfo() != Loc2.getLocInfo()) + return false; + bool RegLoc1 = Loc1.isRegLoc(); + if (RegLoc1 != Loc2.isRegLoc()) + return false; + if (RegLoc1) { + if (Loc1.getLocReg() != Loc2.getLocReg()) + return false; + } else { + if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) + return false; + } + } + return true; +} diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index a262b1d..5351f28 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2812,7 +2812,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) return false; - const MachineFunction &MF = DAG.getMachineFunction(); + MachineFunction &MF = DAG.getMachineFunction(); const Function *CallerF = MF.getFunction(); CallingConv::ID CallerCC = CallerF->getCallingConv(); bool CCMatch = CallerCC == CalleeCC; @@ -2861,6 +2861,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( assert((!isVarArg || CalleeCC == CallingConv::C) && "Unexpected variadic calling convention"); + LLVMContext &C = *DAG.getContext(); if (isVarArg && !Outs.empty()) { // At least two cases here: if caller is fastcc then we can't have any // memory arguments (we'd be expected to clean up the stack afterwards). If @@ -2869,8 +2870,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( // FIXME: for now we take the most conservative of these in both cases: // disallow all variadic memory operands. SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, - *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true)); for (const CCValAssign &ArgLoc : ArgLocs) @@ -2878,43 +2878,18 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( return false; } - // If the calling conventions do not match, then we'd better make sure the - // results are returned in the same way as what the caller expects. - if (!CCMatch) { - SmallVector RVLocs1; - CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1, - *DAG.getContext()); - CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForCall(CalleeCC, isVarArg)); - - SmallVector RVLocs2; - CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2, - *DAG.getContext()); - CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForCall(CallerCC, isVarArg)); - - if (RVLocs1.size() != RVLocs2.size()) - return false; - for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { - if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) - return false; - if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) - return false; - if (RVLocs1[i].isRegLoc()) { - if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) - return false; - } else { - if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) - return false; - } - } - } + // Check that the call results are passed in the same way. + if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, + CCAssignFnForCall(CalleeCC, isVarArg), + CCAssignFnForCall(CallerCC, isVarArg))) + return false; // Nothing more to check if the callee is taking no arguments if (Outs.empty()) return true; SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, - *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index da72c25..8bdb32a 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -2100,7 +2100,8 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, const SmallVectorImpl &OutVals, const SmallVectorImpl &Ins, SelectionDAG& DAG) const { - const Function *CallerF = DAG.getMachineFunction().getFunction(); + MachineFunction &MF = DAG.getMachineFunction(); + const Function *CallerF = MF.getFunction(); CallingConv::ID CallerCC = CallerF->getCallingConv(); bool CCMatch = CallerCC == CalleeCC; @@ -2147,41 +2148,17 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, return false; } - // If the calling conventions do not match, then we'd better make sure the - // results are returned in the same way as what the caller expects. - if (!CCMatch) { - SmallVector RVLocs1; - ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1, - *DAG.getContext(), Call); - CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); - - SmallVector RVLocs2; - ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2, - *DAG.getContext(), Call); - CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); - - if (RVLocs1.size() != RVLocs2.size()) - return false; - for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { - if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) - return false; - if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) - return false; - if (RVLocs1[i].isRegLoc()) { - if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) - return false; - } else { - if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) - return false; - } - } - } + // Check that the call results are passed in the same way. + LLVMContext &C = *DAG.getContext(); + if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, + CCAssignFnForNode(CalleeCC, true, isVarArg), + CCAssignFnForNode(CallerCC, true, isVarArg))) + return false; // If Caller's vararg or byval argument has been split between registers and // stack, do not perform tail call, since part of the argument is in caller's // local frame. - const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). - getInfo(); + const ARMFunctionInfo *AFI_Caller = MF.getInfo(); if (AFI_Caller->getArgRegsSaveSize()) return false; @@ -2191,13 +2168,10 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // Check if stack adjustment is needed. For now, do not do this if any // argument is passed on the stack. SmallVector ArgLocs; - ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, - *DAG.getContext(), Call); + ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC, false, isVarArg)); if (CCInfo.getNextStackOffset()) { - MachineFunction &MF = DAG.getMachineFunction(); - // Check if the arguments are already laid out in the right way as // the caller's fixed stack objects. MachineFrameInfo *MFI = MF.getFrameInfo(); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d7c3c65..4141932 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3829,6 +3829,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( // Do not sibcall optimize vararg calls unless all arguments are passed via // registers. + LLVMContext &C = *DAG.getContext(); if (isVarArg && !Outs.empty()) { // Optimizing for varargs on Win64 is unlikely to be safe without // additional testing. @@ -3836,8 +3837,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( return false; SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, - *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CC_X86); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) @@ -3857,8 +3857,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( } if (Unused) { SmallVector RVLocs; - CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs, - *DAG.getContext()); + CCState CCInfo(CalleeCC, false, MF, RVLocs, C); CCInfo.AnalyzeCallResult(Ins, RetCC_X86); for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; @@ -3867,35 +3866,10 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( } } - // If the calling conventions do not match, then we'd better make sure the - // results are returned in the same way as what the caller expects. - if (!CCMatch) { - SmallVector RVLocs1; - CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1, - *DAG.getContext()); - CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); - - SmallVector RVLocs2; - CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2, - *DAG.getContext()); - CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); - - if (RVLocs1.size() != RVLocs2.size()) - return false; - for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { - if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) - return false; - if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) - return false; - if (RVLocs1[i].isRegLoc()) { - if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) - return false; - } else { - if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) - return false; - } - } - } + // Check that the call results are passed in the same way. + if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, + RetCC_X86, RetCC_X86)) + return false; unsigned StackArgsSize = 0; @@ -3905,8 +3879,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( // Check if stack adjustment is needed. For now, do not do this if any // argument is passed on the stack. SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, - *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); // Allocate shadow area for Win64 if (IsCalleeWin64)