/// returns. However, once a decision has been made on where an
/// argument should go, exactly what happens can vary slightly. This
/// class abstracts the differences.
- struct ValueHandler {
- ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, CCAssignFn *AssignFn_,
- CCAssignFn *AssignFnVarArg_ = nullptr)
- : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn_),
- AssignFnVarArg(AssignFnVarArg_),
+ ///
+ /// ValueAssigner should not depend on any specific function state, and
+ /// only determine the types and locations for arguments.
+ struct ValueAssigner {
+ ValueAssigner(bool IsIncoming, CCAssignFn *AssignFn_,
+ CCAssignFn *AssignFnVarArg_ = nullptr)
+ : AssignFn(AssignFn_), AssignFnVarArg(AssignFnVarArg_),
IsIncomingArgumentHandler(IsIncoming) {
// Some targets change the handler depending on whether the call is
AssignFnVarArg = AssignFn;
}
+ virtual ~ValueAssigner() = default;
+
+ /// Returns true if the handler is dealing with incoming arguments,
+ /// i.e. those that move values from some physical location to vregs.
+ bool isIncomingArgumentHandler() const {
+ return IsIncomingArgumentHandler;
+ }
+
+ /// Wrap call to (typically tablegenerated CCAssignFn). This may be
+ /// overridden to track additional state information as arguments are
+ /// assigned or apply target specific hacks around the legacy
+ /// infrastructure.
+ virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
+ ISD::ArgFlagsTy Flags, CCState &State) {
+ if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
+ State))
+ return true;
+ StackOffset = State.getNextStackOffset();
+ return false;
+ }
+
+ /// Assignment function to use for a general call.
+ CCAssignFn *AssignFn;
+
+ /// Assignment function to use for a variadic call. This is usually the same
+ /// as AssignFn on most targets.
+ CCAssignFn *AssignFnVarArg;
+
+ /// Stack offset for next argument. At the end of argument evaluation, this
+ /// is typically the total stack size.
+ uint64_t StackOffset = 0;
+
+ /// Select the appropriate assignment function depending on whether this is
+ /// a variadic call.
+ CCAssignFn *getAssignFn(bool IsVarArg) const {
+ return IsVarArg ? AssignFnVarArg : AssignFn;
+ }
+
+ private:
+ bool IsIncomingArgumentHandler;
+ virtual void anchor();
+ };
+
+ struct IncomingValueAssigner : public ValueAssigner {
+ IncomingValueAssigner(CCAssignFn *AssignFn_,
+ CCAssignFn *AssignFnVarArg_ = nullptr)
+ : ValueAssigner(true, AssignFn_, AssignFnVarArg_) {}
+ };
+
+ struct OutgoingValueAssigner : public ValueAssigner {
+ OutgoingValueAssigner(CCAssignFn *AssignFn_,
+ CCAssignFn *AssignFnVarArg_ = nullptr)
+ : ValueAssigner(false, AssignFn_, AssignFnVarArg_) {}
+ };
+
+ struct ValueHandler {
+ MachineIRBuilder &MIRBuilder;
+ MachineRegisterInfo &MRI;
+ bool IsIncomingArgumentHandler;
+
+ ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
+ MachineRegisterInfo &MRI)
+ : MIRBuilder(MIRBuilder), MRI(MRI),
+ IsIncomingArgumentHandler(IsIncoming) {}
+
virtual ~ValueHandler() = default;
/// Returns true if the handler is dealing with incoming arguments,
///
/// This is overridable primarily for targets to maintain compatibility with
/// hacks around the existing DAG call lowering infrastructure.
- virtual uint64_t getStackValueStoreSize(const CCValAssign &VA) const;
+ virtual uint64_t getStackValueStoreSize(const DataLayout &DL,
+ const CCValAssign &VA) const;
/// The specified value has been assigned to a physical register,
/// handle the appropriate COPY (either to or from) and mark any
/// to at most MaxSize bits. If MaxSizeBits is 0 then no maximum is set.
Register extendRegister(Register ValReg, CCValAssign &VA,
unsigned MaxSizeBits = 0);
-
- /// Wrap call to (typically tablegenerated CCAssignFn). This may be
- /// overridden to track additional state information as arguments are
- /// assigned or apply target specific hacks around the legacy
- /// infrastructure.
- virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
- ISD::ArgFlagsTy Flags, CCState &State) {
- return getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
- State);
- }
-
- MachineIRBuilder &MIRBuilder;
- MachineRegisterInfo &MRI;
-
- /// Assignment function to use for a general call.
- CCAssignFn *AssignFn;
-
- /// Assignment function to use for a variadic call. This is usually the same
- /// as AssignFn.
- CCAssignFn *AssignFnVarArg;
-
- /// Select the appropriate assignment function depending on whether this is
- /// a variadic call.
- CCAssignFn *getAssignFn(bool IsVarArg) const {
- return IsVarArg ? AssignFnVarArg : AssignFn;
- }
-
- private:
- bool IsIncomingArgumentHandler;
- virtual void anchor();
};
+ /// Base class for ValueHandlers used for arguments coming into the current
+ /// function, or for return values received from a call.
struct IncomingValueHandler : public ValueHandler {
- IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn_,
- CCAssignFn *AssignFnVarArg_ = nullptr)
- : ValueHandler(true, MIRBuilder, MRI, AssignFn_, AssignFnVarArg_) {}
+ IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : ValueHandler(/*IsIncoming*/ true, MIRBuilder, MRI) {}
/// Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on \p
/// VA, returning the new register if a hint was inserted.
CCValAssign &VA) override;
};
+ /// Base class for ValueHandlers used for arguments passed to a function call,
+ /// or for return values.
struct OutgoingValueHandler : public ValueHandler {
- OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn,
- CCAssignFn *AssignFnVarArg = nullptr)
- : ValueHandler(false, MIRBuilder, MRI, AssignFn, AssignFnVarArg) {}
+ OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : ValueHandler(/*IsIncoming*/ false, MIRBuilder, MRI) {}
};
protected:
void unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, Type *PackedTy,
MachineIRBuilder &MIRBuilder) const;
- /// Invoke Handler::assignArg on each of the given \p Args and then use
+ /// Analyze the argument list in \p Args, using \p Assigner to populate \p
+ /// CCInfo. This will determine the types and locations to use for passed or
+ /// returned values. This may resize fields in \p Args if the value is split
+ /// across multiple registers or stack slots.
+ ///
+ /// This is independent of the function state and can be used
+ /// to determine how a call would pass arguments without needing to change the
+ /// function. This can be used to check if arguments are suitable for tail
+ /// call lowering.
+ ///
+ /// \return True if everything has succeeded, false otherwise.
+ bool determineAssignments(ValueAssigner &Assigner,
+ SmallVectorImpl<ArgInfo> &Args,
+ CCState &CCInfo) const;
+
+ /// Invoke ValueAssigner::assignArg on each of the given \p Args and then use
/// \p Handler to move them to the assigned locations.
///
/// \return True if everything has succeeded, false otherwise.
- bool handleAssignments(MachineIRBuilder &MIRBuilder,
- SmallVectorImpl<ArgInfo> &Args, ValueHandler &Handler,
- CallingConv::ID CallConv, bool IsVarArg,
- Register ThisReturnReg = Register()) const;
- bool handleAssignments(CCState &CCState,
+ bool determineAndHandleAssignments(ValueHandler &Handler,
+ ValueAssigner &Assigner,
+ SmallVectorImpl<ArgInfo> &Args,
+ MachineIRBuilder &MIRBuilder,
+ CallingConv::ID CallConv, bool IsVarArg,
+ Register ThisReturnReg = Register()) const;
+
+ /// Use \p Handler to insert code to handle the argument/return values
+ /// represented by \p Args. It's expected determineAssignments previously
+ /// processed these arguments to populate \p CCState and \p ArgLocs.
+ bool handleAssignments(ValueHandler &Handler, SmallVectorImpl<ArgInfo> &Args,
+ CCState &CCState,
SmallVectorImpl<CCValAssign> &ArgLocs,
MachineIRBuilder &MIRBuilder,
- SmallVectorImpl<ArgInfo> &Args, ValueHandler &Handler,
Register ThisReturnReg = Register()) const;
- /// Analyze passed or returned values from a call, supplied in \p ArgInfo,
- /// incorporating info about the passed values into \p CCState.
- ///
- /// Used to check if arguments are suitable for tail call lowering.
- bool analyzeArgInfo(CCState &CCState, SmallVectorImpl<ArgInfo> &Args,
- CCAssignFn &AssignFnFixed,
- CCAssignFn &AssignFnVarArg) const;
-
/// Check whether parameters to a call that are passed in callee saved
/// registers are the same as from the calling function. This needs to be
/// checked for tail call eligibility.
/// \p Info is the CallLoweringInfo for the call.
/// \p MF is the MachineFunction for the caller.
/// \p InArgs contains the results of the call.
- /// \p CalleeAssignFnFixed is the CCAssignFn to be used for the callee for
- /// fixed arguments.
- /// \p CalleeAssignFnVarArg is similar, but for varargs.
- /// \p CallerAssignFnFixed is the CCAssignFn to be used for the caller for
- /// fixed arguments.
- /// \p CallerAssignFnVarArg is similar, but for varargs.
+ /// \p CalleeAssigner specifies the target's handling of the argument types
+ /// for the callee.
+ /// \p CallerAssigner specifies the target's handling of the
+ /// argument types for the caller.
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF,
SmallVectorImpl<ArgInfo> &InArgs,
- CCAssignFn &CalleeAssignFnFixed,
- CCAssignFn &CalleeAssignFnVarArg,
- CCAssignFn &CallerAssignFnFixed,
- CCAssignFn &CallerAssignFnVarArg) const;
+ ValueAssigner &CalleeAssigner,
+ ValueAssigner &CallerAssigner) const;
public:
CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
B.buildUnmerge(UnmergeResults, UnmergeSrc);
}
-bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
- SmallVectorImpl<ArgInfo> &Args,
- ValueHandler &Handler,
- CallingConv::ID CallConv, bool IsVarArg,
- Register ThisReturnReg) const {
+bool CallLowering::determineAndHandleAssignments(
+ ValueHandler &Handler, ValueAssigner &Assigner,
+ SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
+ CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
- return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler,
+ if (!determineAssignments(Assigner, Args, CCInfo))
+ return false;
+
+ return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
ThisReturnReg);
}
return TargetOpcode::G_ANYEXT;
}
-bool CallLowering::handleAssignments(CCState &CCInfo,
- SmallVectorImpl<CCValAssign> &ArgLocs,
- MachineIRBuilder &MIRBuilder,
- SmallVectorImpl<ArgInfo> &Args,
- ValueHandler &Handler,
- Register ThisReturnReg) const {
- MachineFunction &MF = MIRBuilder.getMF();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- const Function &F = MF.getFunction();
- const DataLayout &DL = F.getParent()->getDataLayout();
+bool CallLowering::determineAssignments(ValueAssigner &Assigner,
+ SmallVectorImpl<ArgInfo> &Args,
+ CCState &CCInfo) const {
+ LLVMContext &Ctx = CCInfo.getContext();
+ const CallingConv::ID CallConv = CCInfo.getCallingConv();
unsigned NumArgs = Args.size();
for (unsigned i = 0; i != NumArgs; ++i) {
EVT CurVT = EVT::getEVT(Args[i].Ty);
- MVT NewVT = TLI->getRegisterTypeForCallingConv(
- F.getContext(), CCInfo.getCallingConv(), CurVT);
+ MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
// If we need to split the type over multiple regs, check it's a scenario
// we currently support.
- unsigned NumParts = TLI->getNumRegistersForCallingConv(
- F.getContext(), CCInfo.getCallingConv(), CurVT);
+ unsigned NumParts =
+ TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
if (NumParts == 1) {
// Try to use the register type if we couldn't assign the VT.
- if (Handler.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
- Args[i].Flags[0], CCInfo))
+ if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
+ Args[i].Flags[0], CCInfo))
return false;
continue;
}
Flags.setSplitEnd();
}
- if (!Handler.isIncomingArgumentHandler()) {
+ if (!Assigner.isIncomingArgumentHandler()) {
// TODO: Also check if there is a valid extension that preserves the
// bits. However currently this call lowering doesn't support non-exact
// split parts, so that can't be tested.
}
Args[i].Flags.push_back(Flags);
- if (Handler.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
- Args[i].Flags[Part], CCInfo)) {
+ if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
+ Args[i].Flags[Part], CCInfo)) {
// Still couldn't assign this smaller part type for some reason.
return false;
}
}
}
+ return true;
+}
+
+bool CallLowering::handleAssignments(ValueHandler &Handler,
+ SmallVectorImpl<ArgInfo> &Args,
+ CCState &CCInfo,
+ SmallVectorImpl<CCValAssign> &ArgLocs,
+ MachineIRBuilder &MIRBuilder,
+ Register ThisReturnReg) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const Function &F = MF.getFunction();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+
+ const unsigned NumArgs = Args.size();
+
for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
assert(j < ArgLocs.size() && "Skipped too many arg locs");
CCValAssign &VA = ArgLocs[j];
// TODO: The memory size may be larger than the value we need to
// store. We may need to adjust the offset for big endian targets.
- uint64_t MemSize = Handler.getStackValueStoreSize(VA);
+ uint64_t MemSize = Handler.getStackValueStoreSize(DL, VA);
MachinePointerInfo MPO;
Register StackAddr =
return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
}
-bool CallLowering::analyzeArgInfo(CCState &CCState,
- SmallVectorImpl<ArgInfo> &Args,
- CCAssignFn &AssignFnFixed,
- CCAssignFn &AssignFnVarArg) const {
- for (unsigned i = 0, e = Args.size(); i < e; ++i) {
- MVT VT = MVT::getVT(Args[i].Ty);
- CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
- if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
- // Bail out on anything we can't handle.
- LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
- << " (arg number = " << i << "\n");
- return false;
- }
- }
- return true;
-}
-
bool CallLowering::parametersInCSRMatch(
const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
const SmallVectorImpl<CCValAssign> &OutLocs,
bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
MachineFunction &MF,
SmallVectorImpl<ArgInfo> &InArgs,
- CCAssignFn &CalleeAssignFnFixed,
- CCAssignFn &CalleeAssignFnVarArg,
- CCAssignFn &CallerAssignFnFixed,
- CCAssignFn &CallerAssignFnVarArg) const {
+ ValueAssigner &CalleeAssigner,
+ ValueAssigner &CallerAssigner) const {
const Function &F = MF.getFunction();
CallingConv::ID CalleeCC = Info.CallConv;
CallingConv::ID CallerCC = F.getCallingConv();
SmallVector<CCValAssign, 16> ArgLocs1;
CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
- if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
- CalleeAssignFnVarArg))
+ if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
return false;
SmallVector<CCValAssign, 16> ArgLocs2;
CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
- if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
- CalleeAssignFnVarArg))
+ if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
return false;
// We need the argument locations to match up exactly. If there's more in
}
uint64_t CallLowering::ValueHandler::getStackValueStoreSize(
- const CCValAssign &VA) const {
+ const DataLayout &DL, const CCValAssign &VA) const {
const EVT ValVT = VA.getValVT();
if (ValVT != MVT::iPTR)
return ValVT.getStoreSize();
- const DataLayout &DL = MIRBuilder.getDataLayout();
-
/// FIXME: We need to get the correct pointer address space.
return DL.getPointerSize();
}
llvm_unreachable("unable to extend register");
}
-void CallLowering::ValueHandler::anchor() {}
+void CallLowering::ValueAssigner::anchor() {}
Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
Register SrcReg,
}
namespace {
+
+struct AArch64IncomingValueAssigner
+ : public CallLowering::IncomingValueAssigner {
+ AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,
+ CCAssignFn *AssignFnVarArg_)
+ : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
+
+ bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
+ return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
+ LocInfo, Info, Flags, State);
+ }
+};
+
+struct AArch64OutgoingValueAssigner
+ : public CallLowering::OutgoingValueAssigner {
+ const AArch64Subtarget &Subtarget;
+
+ /// Track if this is used for a return instead of function argument
+ /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
+ /// stack passed returns for them and cannot apply the type adjustment.
+ bool IsReturn;
+
+ AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,
+ CCAssignFn *AssignFnVarArg_,
+ const AArch64Subtarget &Subtarget_,
+ bool IsReturn)
+ : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
+ Subtarget(Subtarget_), IsReturn(IsReturn) {}
+
+ bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
+ bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
+
+ if (!State.isVarArg() && !UseVarArgsCCForFixed && !IsReturn)
+ applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
+
+ bool Res;
+ if (Info.IsFixed && !UseVarArgsCCForFixed)
+ Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
+ else
+ Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
+
+ StackOffset = State.getNextStackOffset();
+ return Res;
+ }
+};
+
struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
- IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : IncomingValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
+ IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : IncomingValueHandler(MIRBuilder, MRI) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
- StackUsed = std::max(StackUsed, Size + Offset);
return AddrReg.getReg(0);
}
- uint64_t getStackValueStoreSize(const CCValAssign &VA) const override {
+ uint64_t getStackValueStoreSize(const DataLayout &,
+ const CCValAssign &VA) const override {
return getStackValueStoreSizeHack(VA);
}
}
}
- bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
- CCState &State) override {
- applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
- return getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
- State);
- }
-
/// How the physical register gets marked varies between formal
/// parameters (it's a basic-block live-in), and a call instruction
/// (it's an implicit-def of the BL).
virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
-
- uint64_t StackUsed;
};
struct FormalArgHandler : public IncomingArgHandler {
- FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
+ FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : IncomingArgHandler(MIRBuilder, MRI) {}
void markPhysRegUsed(MCRegister PhysReg) override {
MIRBuilder.getMRI()->addLiveIn(PhysReg);
struct CallReturnHandler : public IncomingArgHandler {
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- MachineInstrBuilder MIB, CCAssignFn *AssignFn)
- : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+ MachineInstrBuilder MIB)
+ : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
void markPhysRegUsed(MCRegister PhysReg) override {
MIB.addDef(PhysReg, RegState::Implicit);
struct ReturnedArgCallReturnHandler : public CallReturnHandler {
ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
MachineRegisterInfo &MRI,
- MachineInstrBuilder MIB, CCAssignFn *AssignFn)
- : CallReturnHandler(MIRBuilder, MRI, MIB, AssignFn) {}
+ MachineInstrBuilder MIB)
+ : CallReturnHandler(MIRBuilder, MRI, MIB) {}
void markPhysRegUsed(MCRegister PhysReg) override {}
};
struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- MachineInstrBuilder MIB, CCAssignFn *AssignFn,
- CCAssignFn *AssignFnVarArg, bool IsReturn,
- bool IsTailCall = false, int FPDiff = 0)
- : OutgoingValueHandler(MIRBuilder, MRI, AssignFn, AssignFnVarArg),
- MIB(MIB), IsReturn(IsReturn), IsTailCall(IsTailCall), FPDiff(FPDiff),
- StackSize(0), SPReg(0),
+ MachineInstrBuilder MIB, bool IsTailCall = false,
+ int FPDiff = 0)
+ : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),
+ FPDiff(FPDiff),
Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
/// we invert the interpretation of ValVT and LocVT in certain cases. This is
/// for compatability with the DAG call lowering implementation, which we're
/// currently building on top of.
- uint64_t getStackValueStoreSize(const CCValAssign &VA) const override {
+ uint64_t getStackValueStoreSize(const DataLayout &,
+ const CCValAssign &VA) const override {
return getStackValueStoreSizeHack(VA);
}
assignValueToAddress(ValVReg, Addr, MemSize, MPO, VA);
}
- bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
- CCState &State) override {
- bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
- bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
-
- if (!State.isVarArg() && !UseVarArgsCCForFixed && !IsReturn)
- applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
-
- bool Res;
- if (Info.IsFixed && !UseVarArgsCCForFixed)
- Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
- else
- Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
-
- StackSize = State.getNextStackOffset();
- return Res;
- }
-
MachineInstrBuilder MIB;
- /// Track if this is used for a return instead of function argument
- /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
- /// stack passed returns for them and cannot apply the type adjustment.
- bool IsReturn;
bool IsTailCall;
/// For tail calls, the byte offset of the call's argument area from the
/// callee's. Unused elsewhere.
int FPDiff;
- uint64_t StackSize;
// Cache the SP register vreg if we need it more than once in this call site.
Register SPReg;
if (!VRegs.empty()) {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
MachineRegisterInfo &MRI = MF.getRegInfo();
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
}
- OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn,
- /*IsReturn*/ true);
- Success =
- handleAssignments(MIRBuilder, SplitArgs, Handler, CC, F.isVarArg());
+ AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
+ /*IsReturn*/ true);
+ OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
+ Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs,
+ MIRBuilder, CC, F.isVarArg());
}
if (SwiftErrorVReg) {
CCAssignFn *AssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
- FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
- if (!handleAssignments(MIRBuilder, SplitArgs, Handler, F.getCallingConv(),
- F.isVarArg()))
+ AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
+ FormalArgHandler Handler(MIRBuilder, MRI);
+ if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder,
+ F.getCallingConv(), F.isVarArg()))
return false;
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- uint64_t StackOffset = Handler.StackUsed;
+ uint64_t StackOffset = Assigner.StackOffset;
if (F.isVarArg()) {
auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
if (!Subtarget.isTargetDarwin()) {
}
// We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
- StackOffset = alignTo(Handler.StackUsed, Subtarget.isTargetILP32() ? 4 : 8);
+ StackOffset =
+ alignTo(Assigner.StackOffset, Subtarget.isTargetILP32() ? 4 : 8);
auto &MFI = MIRBuilder.getMF().getFrameInfo();
FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
getAssignFnsForCC(CallerCC, TLI);
- if (!resultsCompatible(Info, MF, InArgs, *CalleeAssignFnFixed,
- *CalleeAssignFnVarArg, *CallerAssignFnFixed,
- *CallerAssignFnVarArg))
+ AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
+ CalleeAssignFnVarArg);
+ AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
+ CallerAssignFnVarArg);
+
+ if (!resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))
return false;
// Make sure that the caller and callee preserve all of the same registers.
return true;
const Function &CallerF = MF.getFunction();
+ LLVMContext &Ctx = CallerF.getContext();
CallingConv::ID CalleeCC = Info.CallConv;
CallingConv::ID CallerCC = CallerF.getCallingConv();
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
CCAssignFn *AssignFnFixed;
CCAssignFn *AssignFnVarArg;
// We have outgoing arguments. Make sure that we can tail call with them.
SmallVector<CCValAssign, 16> OutLocs;
- CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
+ CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);
- if (!analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg)) {
+ AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
+ Subtarget, /*IsReturn*/ false);
+ if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) {
LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
return false;
}
MIB.addImm(0);
// Tell the call which registers are clobbered.
- auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+ auto TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
- if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
+ if (Subtarget.hasCustomCallingConv())
TRI->UpdateCustomCallPreservedMask(MF, &Mask);
MIB.addRegMask(Mask);
unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
SmallVector<CCValAssign, 16> OutLocs;
CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
- analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg);
+
+ AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
+ Subtarget, /*IsReturn*/ false);
+ if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
+ return false;
// The callee will pop the argument stack as a tail call. Thus, we must
// keep it 16-byte aligned.
const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
+ AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
+ Subtarget, /*IsReturn*/ false);
+
// Do the actual argument marshalling.
- OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
- AssignFnVarArg, /*IsReturn*/ false,
+ OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,
/*IsTailCall*/ true, FPDiff);
- if (!handleAssignments(MIRBuilder, OutArgs, Handler, CalleeCC, Info.IsVarArg))
+ if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
+ CalleeCC, Info.IsVarArg))
return false;
Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
// Tell the call which registers are clobbered.
const uint32_t *Mask;
- const auto *TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+ const auto *TRI = Subtarget.getRegisterInfo();
+ AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
+ Subtarget, /*IsReturn*/ false);
// Do the actual argument marshalling.
- OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
- AssignFnVarArg, /*IsReturn*/ false);
- if (!handleAssignments(MIRBuilder, OutArgs, Handler, Info.CallConv,
- Info.IsVarArg))
+ OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, /*IsReturn*/ false);
+ if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
+ Info.CallConv, Info.IsVarArg))
return false;
Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
// instruction, it must have a register class matching the
// constraint of that instruction.
if (Info.Callee.isReg())
- constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
- *MF.getSubtarget().getRegBankInfo(), *MIB,
- MIB->getDesc(), Info.Callee, 0);
+ constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
+ *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
+ Info.Callee, 0);
// Finally we can copy the returned value back into its virtual-register. In
// symmetry with the arguments, the physical register must be an
// implicit-define of the call instruction.
if (!Info.OrigRet.Ty->isVoidTy()) {
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
- CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
+ CallReturnHandler Handler(MIRBuilder, MRI, MIB);
bool UsingReturnedArg =
!OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
- ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB,
- RetAssignFn);
- if (!handleAssignments(MIRBuilder, InArgs,
- UsingReturnedArg ? ReturnedArgHandler : Handler,
- Info.CallConv, Info.IsVarArg,
- UsingReturnedArg ? OutArgs[0].Regs[0] : Register()))
+
+ AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
+ /*IsReturn*/ false);
+ ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(
+ UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
+ MIRBuilder, Info.CallConv, Info.IsVarArg,
+ UsingReturnedArg ? OutArgs[0].Regs[0] : Register()))
return false;
}
uint64_t CalleePopBytes =
doesCalleeRestoreStack(Info.CallConv,
MF.getTarget().Options.GuaranteedTailCallOpt)
- ? alignTo(Handler.StackSize, 16)
+ ? alignTo(Assigner.StackOffset, 16)
: 0;
- CallSeqStart.addImm(Handler.StackSize).addImm(0);
+ CallSeqStart.addImm(Assigner.StackOffset).addImm(0);
MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
- .addImm(Handler.StackSize)
+ .addImm(Assigner.StackOffset)
.addImm(CalleePopBytes);
return true;
struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
- MachineInstrBuilder MIB, CCAssignFn *AssignFn)
- : OutgoingValueHandler(B, MRI, AssignFn), MIB(MIB) {}
+ MachineInstrBuilder MIB)
+ : OutgoingValueHandler(B, MRI), MIB(MIB) {}
MachineInstrBuilder MIB;
struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler {
uint64_t StackUsed = 0;
- AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : IncomingValueHandler(B, MRI, AssignFn) {}
+ AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
+ : IncomingValueHandler(B, MRI) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
};
struct FormalArgHandler : public AMDGPUIncomingArgHandler {
- FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : AMDGPUIncomingArgHandler(B, MRI, AssignFn) {}
+ FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
+ : AMDGPUIncomingArgHandler(B, MRI) {}
void markPhysRegUsed(unsigned PhysReg) override {
MIRBuilder.getMBB().addLiveIn(PhysReg);
struct CallReturnHandler : public AMDGPUIncomingArgHandler {
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- MachineInstrBuilder MIB, CCAssignFn *AssignFn)
- : AMDGPUIncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+ MachineInstrBuilder MIB)
+ : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
void markPhysRegUsed(unsigned PhysReg) override {
MIB.addDef(PhysReg, RegState::Implicit);
};
struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
- CCAssignFn *AssignFnVarArg;
-
/// For tail calls, the byte offset of the call's argument area from the
/// callee's. Unused elsewhere.
int FPDiff;
AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder,
MachineRegisterInfo &MRI, MachineInstrBuilder MIB,
- CCAssignFn *AssignFn, CCAssignFn *AssignFnVarArg,
bool IsTailCall = false, int FPDiff = 0)
- : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB, AssignFn),
- AssignFnVarArg(AssignFnVarArg), FPDiff(FPDiff), IsTailCall(IsTailCall) {
- }
+ : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff),
+ IsTailCall(IsTailCall) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
}
CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
- AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret, AssignFn);
- return handleAssignments(B, SplitRetInfos, RetHandler, CC, F.isVarArg());
+
+ OutgoingValueAssigner Assigner(AssignFn);
+ AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret);
+ return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B,
+ CC, F.isVarArg());
}
bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val,
TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
}
- FormalArgHandler Handler(B, MRI, AssignFn);
- if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler))
+ IncomingValueAssigner Assigner(AssignFn);
+ if (!determineAssignments(Assigner, SplitArgs, CCInfo))
+ return false;
+
+ FormalArgHandler Handler(B, MRI);
+ if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B))
return false;
if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) {
// Do the actual argument marshalling.
SmallVector<Register, 8> PhysRegs;
- AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
- AssignFnVarArg, false);
- if (!handleAssignments(CCInfo, ArgLocs, MIRBuilder, OutArgs, Handler))
+
+ OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
+ if (!determineAssignments(Assigner, OutArgs, CCInfo))
+ return false;
+
+ AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false);
+ if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
return false;
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
Info.IsVarArg);
- CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
- if (!handleAssignments(MIRBuilder, InArgs, Handler, Info.CallConv,
- Info.IsVarArg))
+ OutgoingValueAssigner Assigner(RetAssignFn);
+ CallReturnHandler Handler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder,
+ Info.CallConv, Info.IsVarArg))
return false;
}
/// function return values and call parameters).
struct ARMOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
ARMOutgoingValueHandler(MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, MachineInstrBuilder &MIB,
- CCAssignFn *AssignFn)
- : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+ MachineRegisterInfo &MRI, MachineInstrBuilder &MIB)
+ : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
return 1;
}
- bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
- CCState &State) override {
- if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State))
- return true;
-
- StackSize =
- std::max(StackSize, static_cast<uint64_t>(State.getNextStackOffset()));
- return false;
- }
-
MachineInstrBuilder MIB;
- uint64_t StackSize = 0;
};
} // end anonymous namespace
CCAssignFn *AssignFn =
TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg());
- ARMOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret,
- AssignFn);
- return handleAssignments(MIRBuilder, SplitRetInfos, RetHandler,
- F.getCallingConv(), F.isVarArg());
+ OutgoingValueAssigner RetAssigner(AssignFn);
+ ARMOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
+ return determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos,
+ MIRBuilder, F.getCallingConv(),
+ F.isVarArg());
}
bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
/// formal arguments and call return values).
struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
ARMIncomingValueHandler(MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, CCAssignFn AssignFn)
- : IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
+ MachineRegisterInfo &MRI)
+ : IncomingValueHandler(MIRBuilder, MRI) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
};
struct FormalArgHandler : public ARMIncomingValueHandler {
- FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn AssignFn)
- : ARMIncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
+ FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : ARMIncomingValueHandler(MIRBuilder, MRI) {}
void markPhysRegUsed(unsigned PhysReg) override {
MIRBuilder.getMRI()->addLiveIn(PhysReg);
CCAssignFn *AssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), F.isVarArg());
- FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(),
- AssignFn);
+ OutgoingValueAssigner ArgAssigner(AssignFn);
+ FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo());
SmallVector<ArgInfo, 8> SplitArgInfos;
unsigned Idx = 0;
if (!MBB.empty())
MIRBuilder.setInstr(*MBB.begin());
- if (!handleAssignments(MIRBuilder, SplitArgInfos, ArgHandler,
- F.getCallingConv(), F.isVarArg()))
+ if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos,
+ MIRBuilder, F.getCallingConv(),
+ F.isVarArg()))
return false;
// Move back to the end of the basic block.
struct CallReturnHandler : public ARMIncomingValueHandler {
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- MachineInstrBuilder MIB, CCAssignFn *AssignFn)
- : ARMIncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+ MachineInstrBuilder MIB)
+ : ARMIncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
void markPhysRegUsed(unsigned PhysReg) override {
MIB.addDef(PhysReg, RegState::Implicit);
}
auto ArgAssignFn = TLI.CCAssignFnForCall(Info.CallConv, Info.IsVarArg);
- ARMOutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn);
- if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler, Info.CallConv,
- Info.IsVarArg))
+ OutgoingValueAssigner ArgAssigner(ArgAssignFn);
+ ARMOutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, ArgInfos,
+ MIRBuilder, Info.CallConv, Info.IsVarArg))
return false;
// Now we can add the actual call instruction to the correct basic block.
ArgInfos.clear();
splitToValueTypes(Info.OrigRet, ArgInfos, DL, Info.CallConv);
auto RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, Info.IsVarArg);
- CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
- if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler, Info.CallConv,
- Info.IsVarArg))
+ OutgoingValueAssigner Assigner(RetAssignFn);
+ CallReturnHandler RetHandler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(RetHandler, Assigner, ArgInfos,
+ MIRBuilder, Info.CallConv,
+ Info.IsVarArg))
return false;
}
// We now know the size of the stack - update the ADJCALLSTACKDOWN
// accordingly.
- CallSeqStart.addImm(ArgHandler.StackSize).addImm(0).add(predOps(ARMCC::AL));
+ CallSeqStart.addImm(ArgAssigner.StackOffset)
+ .addImm(0)
+ .add(predOps(ARMCC::AL));
MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
- .addImm(ArgHandler.StackSize)
+ .addImm(ArgAssigner.StackOffset)
.addImm(0)
.add(predOps(ARMCC::AL));
namespace {
+struct X86OutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
+private:
+ uint64_t StackSize = 0;
+ unsigned NumXMMRegs = 0;
+
+public:
+ uint64_t getStackSize() { return StackSize; }
+ unsigned getNumXmmRegs() { return NumXMMRegs; }
+
+ X86OutgoingValueAssigner(CCAssignFn *AssignFn_)
+ : CallLowering::OutgoingValueAssigner(AssignFn_) {}
+
+ bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
+ StackSize = State.getNextStackOffset();
+
+ static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
+ X86::XMM3, X86::XMM4, X86::XMM5,
+ X86::XMM6, X86::XMM7};
+ if (!Info.IsFixed)
+ NumXMMRegs = State.getFirstUnallocated(XMMArgRegs);
+
+ return Res;
+ }
+};
+
struct X86OutgoingValueHandler : public CallLowering::OutgoingValueHandler {
X86OutgoingValueHandler(MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, MachineInstrBuilder &MIB,
- CCAssignFn *AssignFn)
- : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
+ MachineRegisterInfo &MRI, MachineInstrBuilder &MIB)
+ : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB),
DL(MIRBuilder.getMF().getDataLayout()),
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
}
- bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
- CCState &State) override {
- bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
- StackSize = State.getNextStackOffset();
-
- static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
- X86::XMM3, X86::XMM4, X86::XMM5,
- X86::XMM6, X86::XMM7};
- if (!Info.IsFixed)
- NumXMMRegs = State.getFirstUnallocated(XMMArgRegs);
-
- return Res;
- }
-
- uint64_t getStackSize() { return StackSize; }
- uint64_t getNumXmmRegs() { return NumXMMRegs; }
-
protected:
MachineInstrBuilder &MIB;
- uint64_t StackSize = 0;
const DataLayout &DL;
const X86Subtarget &STI;
- unsigned NumXMMRegs = 0;
};
} // end anonymous namespace
SmallVector<ArgInfo, 4> SplitRetInfos;
splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, F.getCallingConv());
- X86OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
- if (!handleAssignments(MIRBuilder, SplitRetInfos, Handler, F.getCallingConv(),
- F.isVarArg()))
+ X86OutgoingValueAssigner Assigner(RetCC_X86);
+ X86OutgoingValueHandler Handler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(Handler, Assigner, SplitRetInfos,
+ MIRBuilder, F.getCallingConv(),
+ F.isVarArg()))
return false;
}
struct X86IncomingValueHandler : public CallLowering::IncomingValueHandler {
X86IncomingValueHandler(MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
- : IncomingValueHandler(MIRBuilder, MRI, AssignFn),
+ MachineRegisterInfo &MRI)
+ : IncomingValueHandler(MIRBuilder, MRI),
DL(MIRBuilder.getMF().getDataLayout()) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
};
struct FormalArgHandler : public X86IncomingValueHandler {
- FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : X86IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
+ FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : X86IncomingValueHandler(MIRBuilder, MRI) {}
void markPhysRegUsed(unsigned PhysReg) override {
MIRBuilder.getMRI()->addLiveIn(PhysReg);
struct CallReturnHandler : public X86IncomingValueHandler {
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn, MachineInstrBuilder &MIB)
- : X86IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+ MachineInstrBuilder &MIB)
+ : X86IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
void markPhysRegUsed(unsigned PhysReg) override {
MIB.addDef(PhysReg, RegState::Implicit);
if (!MBB.empty())
MIRBuilder.setInstr(*MBB.begin());
- FormalArgHandler Handler(MIRBuilder, MRI, CC_X86);
- if (!handleAssignments(MIRBuilder, SplitArgs, Handler, F.getCallingConv(),
- F.isVarArg()))
+ X86OutgoingValueAssigner Assigner(CC_X86);
+ FormalArgHandler Handler(MIRBuilder, MRI);
+ if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder,
+ F.getCallingConv(), F.isVarArg()))
return false;
// Move back to the end of the basic block.
splitToValueTypes(OrigArg, SplitArgs, DL, Info.CallConv);
}
// Do the actual argument marshalling.
- X86OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86);
- if (!handleAssignments(MIRBuilder, SplitArgs, Handler, Info.CallConv,
- Info.IsVarArg))
+ X86OutgoingValueAssigner Assigner(CC_X86);
+ X86OutgoingValueHandler Handler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder,
+ Info.CallConv, Info.IsVarArg))
return false;
bool IsFixed = Info.OrigArgs.empty() ? true : Info.OrigArgs.back().IsFixed;
MIRBuilder.buildInstr(X86::MOV8ri)
.addDef(X86::AL)
- .addImm(Handler.getNumXmmRegs());
+ .addImm(Assigner.getNumXmmRegs());
MIB.addUse(X86::AL, RegState::Implicit);
}
splitToValueTypes(Info.OrigRet, SplitArgs, DL, Info.CallConv);
- CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB);
- if (!handleAssignments(MIRBuilder, SplitArgs, Handler, Info.CallConv,
- Info.IsVarArg))
+ X86OutgoingValueAssigner Assigner(RetCC_X86);
+ CallReturnHandler Handler(MIRBuilder, MRI, MIB);
+ if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder,
+ Info.CallConv, Info.IsVarArg))
return false;
if (!NewRegs.empty())
MIRBuilder.buildMerge(Info.OrigRet.Regs[0], NewRegs);
}
- CallSeqStart.addImm(Handler.getStackSize())
+ CallSeqStart.addImm(Assigner.getStackSize())
.addImm(0 /* see getFrameTotalSize */)
.addImm(0 /* see getFrameAdjustment */);
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
MIRBuilder.buildInstr(AdjStackUp)
- .addImm(Handler.getStackSize())
+ .addImm(Assigner.getStackSize())
.addImm(0 /* NumBytesForCalleeToPop */);
return true;
musttail call void @must_callee(i8* null)
ret void
}
+
+; Verify we emit a tail call with a type that requires splitting into
+; multiple registers.
+declare void @outgoing_v16f16(<16 x half>)
+define void @test_tail_call_outgoing_v16f16(<16 x half> %arg) {
+ ; DARWIN-LABEL: name: test_tail_call_outgoing_v16f16
+ ; DARWIN: bb.1 (%ir-block.0):
+ ; DARWIN: liveins: $q0, $q1
+ ; DARWIN: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+ ; DARWIN: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+ ; DARWIN: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[COPY]](<8 x s16>), [[COPY1]](<8 x s16>)
+ ; DARWIN: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s16>)
+ ; DARWIN: $q0 = COPY [[UV]](<8 x s16>)
+ ; DARWIN: $q1 = COPY [[UV1]](<8 x s16>)
+ ; DARWIN: TCRETURNdi @outgoing_v16f16, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $q0, implicit $q1
+ ; WINDOWS-LABEL: name: test_tail_call_outgoing_v16f16
+ ; WINDOWS: bb.1 (%ir-block.0):
+ ; WINDOWS: liveins: $q0, $q1
+ ; WINDOWS: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+ ; WINDOWS: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+ ; WINDOWS: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[COPY]](<8 x s16>), [[COPY1]](<8 x s16>)
+ ; WINDOWS: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s16>)
+ ; WINDOWS: $q0 = COPY [[UV]](<8 x s16>)
+ ; WINDOWS: $q1 = COPY [[UV1]](<8 x s16>)
+ ; WINDOWS: TCRETURNdi @outgoing_v16f16, 0, csr_aarch64_aapcs, implicit $sp, implicit $q0, implicit $q1
+ tail call void @outgoing_v16f16(<16 x half> %arg)
+ ret void
+}