ArgListTy Args;
SelectionDAG &DAG;
SDLoc DL;
- ImmutableCallSite CS;
+ const CallBase *CB = nullptr;
SmallVector<ISD::OutputArg, 32> Outs;
SmallVector<SDValue, 32> OutVals;
SmallVector<ISD::InputArg, 32> Ins;
CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
SDValue Target, ArgListTy &&ArgsList,
- ImmutableCallSite Call) {
+ const CallBase &Call) {
RetTy = ResultType;
IsInReg = Call.hasRetAttr(Attribute::InReg);
DoesNotReturn =
Call.doesNotReturn() ||
- (!Call.isInvoke() &&
- isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
+ (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
IsVarArg = FTy->isVarArg();
- IsReturnValueUsed = !Call.getInstruction()->use_empty();
+ IsReturnValueUsed = !Call.use_empty();
RetSExt = Call.hasRetAttr(Attribute::SExt);
RetZExt = Call.hasRetAttr(Attribute::ZExt);
NumFixedArgs = FTy->getNumParams();
Args = std::move(ArgsList);
- CS = Call;
+ CB = &Call;
return *this;
}
// with deopt state.
LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
} else {
- LowerCallTo(&I, getValue(Callee), false, EHPadBB);
+ LowerCallTo(I, getValue(Callee), false, EHPadBB);
}
// If the value of the invoke is used outside of its defining block, make it
SDValue Callee = DAG.getExternalSymbol(
FunctionName,
DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
- LowerCallTo(&I, Callee, I.isTailCall());
+ LowerCallTo(I, Callee, I.isTailCall());
}
/// Lower the call to the specified intrinsic function.
// There is a platform (e.g. wasm) that uses funclet style IR but does not
// actually use outlined funclets and their LSDA info style.
if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
- assert(CLI.CS);
+ assert(CLI.CB);
WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
- EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
- BeginLabel, EndLabel);
+ EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel);
} else if (!isScopedEHPersonality(Pers)) {
MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
}
return Result;
}
-void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
+void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
bool isTailCall,
const BasicBlock *EHPadBB) {
auto &DL = DAG.getDataLayout();
- FunctionType *FTy = CS.getFunctionType();
- Type *RetTy = CS.getType();
+ FunctionType *FTy = CB.getFunctionType();
+ Type *RetTy = CB.getType();
TargetLowering::ArgListTy Args;
- Args.reserve(CS.arg_size());
+ Args.reserve(CB.arg_size());
const Value *SwiftErrorVal = nullptr;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (isTailCall) {
// Avoid emitting tail calls in functions with the disable-tail-calls
// attribute.
- auto *Caller = CS.getInstruction()->getParent()->getParent();
+ auto *Caller = CB.getParent()->getParent();
if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
"true")
isTailCall = false;
isTailCall = false;
}
- for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
- i != e; ++i) {
+ for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
TargetLowering::ArgListEntry Entry;
- const Value *V = *i;
+ const Value *V = *I;
// Skip empty types
if (V->getType()->isEmptyTy())
SDValue ArgNode = getValue(V);
Entry.Node = ArgNode; Entry.Ty = V->getType();
- Entry.setAttributes(&CS, i - CS.arg_begin());
+ Entry.setAttributes(&CB, I - CB.arg_begin());
// Use swifterror virtual register as input to the call.
if (Entry.IsSwiftError && TLI.supportSwiftError()) {
SwiftErrorVal = V;
// We find the virtual register for the actual swifterror argument.
// Instead of using the Value, we use the virtual register instead.
- Entry.Node = DAG.getRegister(
- SwiftError.getOrCreateVRegUseAt(CS.getInstruction(), FuncInfo.MBB, V),
- EVT(TLI.getPointerTy(DL)));
+ Entry.Node =
+ DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
+ EVT(TLI.getPointerTy(DL)));
}
Args.push_back(Entry);
// If call site has a cfguardtarget operand bundle, create and add an
// additional ArgListEntry.
- if (auto Bundle = CS.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
+ if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
TargetLowering::ArgListEntry Entry;
Value *V = Bundle->Inputs[0];
SDValue ArgNode = getValue(V);
// Check if target-independent constraints permit a tail call here.
// Target-dependent constraints are checked within TLI->LowerCallTo.
- if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
+ if (isTailCall &&
+ !isInTailCallPosition(ImmutableCallSite(&CB), DAG.getTarget()))
isTailCall = false;
// Disable tail calls if there is an swifterror argument. Targets have not
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(getCurSDLoc())
.setChain(getRoot())
- .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
+ .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
.setTailCall(isTailCall)
- .setConvergent(CS.isConvergent());
+ .setConvergent(CB.isConvergent());
std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
if (Result.first.getNode()) {
- const Instruction *Inst = CS.getInstruction();
- Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
- setValue(Inst, Result.first);
+ Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
+ setValue(&CB, Result.first);
}
// The last element of CLI.InVals has the SDValue for swifterror return.
if (SwiftErrorVal && TLI.supportSwiftError()) {
// Get the last element of InVals.
SDValue Src = CLI.InVals.back();
- Register VReg = SwiftError.getOrCreateVRegDefAt(
- CS.getInstruction(), FuncInfo.MBB, SwiftErrorVal);
+ Register VReg =
+ SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
DAG.setRoot(CopyNode);
}
// Check if we can potentially perform a tail call. More detailed checking
// is be done within LowerCallTo, after more information about the call is
// known.
- LowerCallTo(&I, Callee, I.isTailCall());
+ LowerCallTo(I, Callee, I.isTailCall());
}
namespace {
Flags.setReturned();
}
- getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
- CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
+ getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
+ CLI.CallConv, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
void CopyToExportRegsIfNeeded(const Value *V);
void ExportFromCurrentBlock(const Value *V);
- void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
+ void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall,
const BasicBlock *EHPadBB = nullptr);
// Lower range metadata from 0 to N to assert zext to an integer of nearest
// Check if it's really possible to do a tail call.
IsTailCall = isEligibleForTailCallOptimization(
Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
- if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall())
+ if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
SmallVector<SDValue, 8> MemOpChains;
auto PtrVT = getPointerTy(DAG.getDataLayout());
- if (IsVarArg && CLI.CS && CLI.CS.isMustTailCall()) {
+ if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
for (const auto &F : Forwards) {
SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
SDValue Chain) const {
// If we don't have a call site, this was a call inserted by
// legalization. These can never use special inputs.
- if (!CLI.CS)
+ if (!CLI.CB)
return;
SelectionDAG &DAG = CLI.DAG;
const AMDGPUFunctionArgInfo *CalleeArgInfo
= &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
- if (const Function *CalleeFunc = CLI.CS.getCalledFunction()) {
+ if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) {
auto &ArgUsageInfo =
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
"unsupported call to variadic function ");
}
- if (!CLI.CS.getInstruction())
+ if (!CLI.CB)
report_fatal_error("unsupported libcall legalization");
- if (!AMDGPUTargetMachine::EnableFixedFunctionABI && !CLI.CS.getCalledFunction()) {
+ if (!AMDGPUTargetMachine::EnableFixedFunctionABI &&
+ !CLI.CB->getCalledFunction()) {
return lowerUnhandledCall(CLI, InVals,
"unsupported indirect call to function ");
}
if (IsTailCall) {
IsTailCall = isEligibleForTailCallOptimization(
Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
- if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
+ if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) {
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
}
// more times in this block, we can improve codesize by calling indirectly
// as BLXr has a 16-bit encoding.
auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
- if (CLI.CS) {
- auto *BB = CLI.CS.getParent();
+ if (CLI.CB) {
+ auto *BB = CLI.CB->getParent();
PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
count_if(GV->users(), [&BB](const User *U) {
return isa<Instruction>(U) &&
Callee, CallConv, isVarArg, isStructRet,
MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
PreferIndirect);
- if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall())
+ if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
// We don't support GuaranteedTailCallOpt for ARM, only automatically
MachineFrameInfo &MFI = MF.getFrameInfo();
auto PtrVT = getPointerTy(MF.getDataLayout());
- unsigned NumParams = CLI.CS.getInstruction()
- ? CLI.CS.getFunctionType()->getNumParams()
- : 0;
+ unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0;
if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
G->getGlobal()->hasProtectedVisibility());
}
}
- if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall())
+ if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
std::string NVPTXTargetLowering::getPrototype(
const DataLayout &DL, Type *retTy, const ArgListTy &Args,
const SmallVectorImpl<ISD::OutputArg> &Outs, MaybeAlign retAlignment,
- ImmutableCallSite CS) const {
+ const CallBase &CB) const {
auto PtrVT = getPointerTy(DL);
bool isABI = (STI.getSmVersion() >= 20);
if (!Outs[OIdx].Flags.isByVal()) {
if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) {
unsigned align = 0;
- const CallInst *CallI = cast<CallInst>(CS.getInstruction());
+ const CallInst *CallI = cast<CallInst>(&CB);
// +1 because index 0 is reserved for return type alignment
if (!getAlign(*CallI, i + 1, align))
align = DL.getABITypeAlignment(Ty);
}
Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
- ImmutableCallSite CS, Type *Ty,
+ const CallBase *CB, Type *Ty,
unsigned Idx,
const DataLayout &DL) const {
- if (!CS) {
+ if (!CB) {
// CallSite is zero, fallback to ABI type alignment
return DL.getABITypeAlign(Ty);
}
unsigned Alignment = 0;
- const Value *DirectCallee = CS.getCalledFunction();
+ const Value *DirectCallee = CB->getCalledFunction();
if (!DirectCallee) {
// We don't have a direct function symbol, but that may be because of
// constant cast instructions in the call.
- const Instruction *CalleeI = CS.getInstruction();
- assert(CalleeI && "Call target is not a function or derived value?");
// With bitcast'd call targets, the instruction will be the call
- if (isa<CallInst>(CalleeI)) {
+ if (isa<CallInst>(CB)) {
// Check if we have call alignment metadata
- if (getAlign(*cast<CallInst>(CalleeI), Idx, Alignment))
+ if (getAlign(*cast<CallInst>(CB), Idx, Alignment))
return Align(Alignment);
- const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
+ const Value *CalleeV = cast<CallInst>(CB)->getCalledValue();
// Ignore any bitcast instructions
while (isa<ConstantExpr>(CalleeV)) {
const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
bool &isTailCall = CLI.IsTailCall;
ArgListTy &Args = CLI.getArgs();
Type *RetTy = CLI.RetTy;
- ImmutableCallSite CS = CLI.CS;
+ const CallBase *CB = CLI.CB;
const DataLayout &DL = DAG.getDataLayout();
bool isABI = (STI.getSmVersion() >= 20);
SmallVector<EVT, 16> VTs;
SmallVector<uint64_t, 16> Offsets;
ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
- Align ArgAlign = getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
+ Align ArgAlign = getArgumentAlignment(Callee, CB, Ty, paramCount + 1, DL);
unsigned AllocSize = DL.getTypeAllocSize(Ty);
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
bool NeedAlign; // Does argument declaration specify alignment?
DeclareRetOps);
InFlag = Chain.getValue(1);
} else {
- retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ retAlignment = getArgumentAlignment(Callee, CB, RetTy, 0, DL);
assert(retAlignment && "retAlignment is guaranteed to be set");
SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue DeclareRetOps[] = {
// Both indirect calls and libcalls have nullptr Func. In order to distinguish
// between them we must rely on the call site value which is valid for
// indirect calls but is always null for libcalls.
- bool isIndirectCall = !Func && CS;
+ bool isIndirectCall = !Func && CB;
if (isa<ExternalSymbolSDNode>(Callee)) {
Function* CalleeFunc = nullptr;
// The prototype is embedded in a string and put as the operand for a
// CallPrototype SDNode which will print out to the value of the string.
SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
+ std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, *CB);
const char *ProtoStr =
nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
SDValue ProtoOps[] = {
ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
assert(VTs.size() == Ins.size() && "Bad value decomposition");
- Align RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ Align RetAlign = getArgumentAlignment(Callee, CB, RetTy, 0, DL);
auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
SmallVector<EVT, 6> LoadVTs;
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &,
const SmallVectorImpl<ISD::OutputArg> &,
- MaybeAlign retAlignment, ImmutableCallSite CS) const;
+ MaybeAlign retAlignment, const CallBase &CB) const;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
SelectionDAG &DAG) const override;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- Align getArgumentAlignment(SDValue Callee, ImmutableCallSite CS, Type *Ty,
+ Align getArgumentAlignment(SDValue Callee, const CallBase *CB, Type *Ty,
unsigned Idx, const DataLayout &DL) const;
};
} // namespace llvm
return false;
}
-static bool
-hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
- if (CS.arg_size() != CallerFn->arg_size())
+static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
+ if (CB.arg_size() != CallerFn->arg_size())
return false;
- ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
- ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
+ auto CalleeArgIter = CB.arg_begin();
+ auto CalleeArgEnd = CB.arg_end();
Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
return CallerCC == CallingConv::C || CallerCC == CalleeCC;
}
-bool
-PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
- SDValue Callee,
- CallingConv::ID CalleeCC,
- ImmutableCallSite CS,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SelectionDAG& DAG) const {
+bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
+ SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
// FIXME: Tail calls are currently disabled when using PC Relative addressing.
// If callee use the same argument list that caller is using, then we can
// apply SCO on this case. If it is not, then we need to check if callee needs
// stack for passing arguments.
- if (!hasSameArgumentList(&Caller, CS) &&
+ assert(CB && "Expected to have a CallBase!");
+ if (!hasSameArgumentList(&Caller, *CB) &&
needStackSlotPassParameters(Subtarget, Outs)) {
return false;
}
static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
SDValue &Glue, SDValue &Chain,
SDValue CallSeqStart,
- ImmutableCallSite CS, const SDLoc &dl,
+ const CallBase *CB, const SDLoc &dl,
bool hasNest,
const PPCSubtarget &Subtarget) {
// Function pointers in the 64-bit SVR4 ABI do not point to the function
MachineMemOperand::MOInvariant)
: MachineMemOperand::MONone;
- MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
+ MachinePointerInfo MPI(CB ? CB->getCalledValue() : nullptr);
// Registers used in building the DAG.
const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
+ SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
Subtarget.isAIXABI())
if (!CFlags.IsIndirect)
Callee = transformCallee(Callee, DAG, dl, Subtarget);
else if (Subtarget.usesFunctionDescriptors())
- prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS,
+ prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
dl, CFlags.HasNest, Subtarget);
else
prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
bool isPatchPoint = CLI.IsPatchPoint;
- ImmutableCallSite CS = CLI.CS;
+ const CallBase *CB = CLI.CB;
if (isTailCall) {
- if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
+ if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
isTailCall = false;
else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
- isTailCall =
- IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
- isVarArg, Outs, Ins, DAG);
+ isTailCall = IsEligibleForTailCallOptimization_64SVR4(
+ Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
else
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
}
}
- if (!isTailCall && CS && CS.isMustTailCall())
+ if (!isTailCall && CB && CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
- InVals, CS);
+ InVals, CB);
if (Subtarget.isSVR4ABI())
return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
- InVals, CS);
+ InVals, CB);
if (Subtarget.isAIXABI())
return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
- InVals, CS);
+ InVals, CB);
return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
- InVals, CS);
+ InVals, CB);
}
SDValue PPCTargetLowering::LowerCall_32SVR4(
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const {
+ const CallBase *CB) const {
// See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
TailCallArguments);
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
- Callee, SPDiff, NumBytes, Ins, InVals, CS);
+ Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
// Copy an argument into memory, being careful to do this outside the
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const {
+ const CallBase *CB) const {
bool isELFv2ABI = Subtarget.isELFv2ABI();
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned NumOps = Outs.size();
TailCallArguments);
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
- Callee, SPDiff, NumBytes, Ins, InVals, CS);
+ Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
SDValue PPCTargetLowering::LowerCall_Darwin(
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const {
+ const CallBase *CB) const {
unsigned NumOps = Outs.size();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
TailCallArguments);
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
- Callee, SPDiff, NumBytes, Ins, InVals, CS);
+ Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const {
+ const CallBase *CB) const {
assert((CFlags.CallConv == CallingConv::C ||
CFlags.CallConv == CallingConv::Cold ||
const int SPDiff = 0;
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
- Callee, SPDiff, NumBytes, Ins, InVals, CS);
+ Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
bool
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
- bool
- IsEligibleForTailCallOptimization_64SVR4(
- SDValue Callee,
- CallingConv::ID CalleeCC,
- ImmutableCallSite CS,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SelectionDAG& DAG) const;
+ bool IsEligibleForTailCallOptimization_64SVR4(
+ SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB,
+ bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
SDValue Chain, SDValue &LROpOut,
SDValue &Callee, int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const;
+ const CallBase *CB) const;
SDValue
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const;
+ const CallBase *CB) const;
SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const;
+ const CallBase *CB) const;
SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const;
+ const CallBase *CB) const;
SDValue LowerCall_AIX(SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite CS) const;
+ const CallBase *CB) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
if (IsTailCall)
++NumTailCalls;
- else if (CLI.CS && CLI.CS.isMustTailCall())
+ else if (CLI.CB && CLI.CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
}
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
- ImmutableCallSite CS) {
- if (CS)
- return CS.hasFnAttr(Attribute::ReturnsTwice);
+ const CallBase *Call) {
+ if (Call)
+ return Call->hasFnAttr(Attribute::ReturnsTwice);
const Function *CalleeFn = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
InFlag = Chain.getValue(1);
}
- bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
+ bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
SDValue Callee = CLI.Callee;
- bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
+ bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
// Set inreg flag manually for codegen generated library calls that
// return float.
- if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
+ if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
CLI.Ins[0].Flags.setInReg();
RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
if (CLI.IsTailCall) {
auto NoTail = [&](const char *Msg) {
- if (CLI.CS && CLI.CS.isMustTailCall())
+ if (CLI.CB && CLI.CB->isMustTailCall())
fail(DL, DAG, Msg);
CLI.IsTailCall = false;
};
"match");
// If pointers to local stack values are passed, we cannot tail call
- if (CLI.CS) {
- for (auto &Arg : CLI.CS.args()) {
+ if (CLI.CB) {
+ for (auto &Arg : CLI.CB->args()) {
Value *Val = Arg.get();
// Trace the value back through pointer operations
while (true) {
bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
CallConv == CallingConv::Tail;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
- const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
+ const auto *CI = dyn_cast_or_null<CallInst>(CLI.CB);
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
(Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
- const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
+ const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CB);
bool HasNoCfCheck =
(CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
const Module *M = MF.getMMI().getModule();
isTailCall = false;
}
- bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
+ bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
if (IsMustTail) {
// Force this to be a tail call. The verifier rules are enough to ensure
// that we can lower this successfully without moving the return address
// is thrown, the runtime will not restore CSRs.
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
- if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
+ if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
const Function &CallerFn = MF.getFunction();
EHPersonality Pers =
CallerFn.hasPersonalityFn()
DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
// Save heapallocsite metadata.
- if (CLI.CS)
- if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
+ if (CLI.CB)
+ if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
// Create the CALLSEQ_END node.