#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#ifndef LLVM_BUILD_GLOBAL_ISEL
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
- const Value *Val, unsigned VReg) const {
- MachineInstr *Return = MIRBuilder.buildInstr(AArch64::RET_ReallyLR);
- assert(Return && "Unable to build a return instruction?!");
+ const Value *Val, unsigned VReg) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = *MF.getFunction();
+
+ MachineInstrBuilder MIB = MIRBuilder.buildInstr(AArch64::RET_ReallyLR);
+ assert(MIB.getInstr() && "Unable to build a return instruction?!");
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
if (VReg) {
- assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy()) &&
- "Type not supported yet");
- const Function &F = *MIRBuilder.getMF().getFunction();
- const DataLayout &DL = F.getParent()->getDataLayout();
- unsigned Size = DL.getTypeSizeInBits(Val->getType());
- assert((Size == 64 || Size == 32) && "Size not supported yet");
- unsigned ResReg = (Size == 32) ? AArch64::W0 : AArch64::X0;
- // Set the insertion point to be right before Return.
- MIRBuilder.setInstr(*Return, /* Before */ true);
- MachineInstr *Copy = MIRBuilder.buildCopy(ResReg, VReg);
- (void)Copy;
- assert(Copy->getNextNode() == Return &&
- "The insertion did not happen where we expected");
- MachineInstrBuilder(MIRBuilder.getMF(), Return)
- .addReg(ResReg, RegState::Implicit);
+ MIRBuilder.setInstr(*MIB.getInstr(), /* Before */ true);
+ const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
+ CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
+
+ handleAssignments(
+ MIRBuilder, AssignFn, MVT::getVT(Val->getType()), VReg,
+ [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+ MIB.addUse(PhysReg, RegState::Implicit);
+ });
}
return true;
}
-bool AArch64CallLowering::lowerFormalArguments(
- MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
- const SmallVectorImpl<unsigned> &VRegs) const {
+bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
+ CCAssignFn *AssignFn,
+ ArrayRef<MVT> ArgTypes,
+ ArrayRef<unsigned> ArgRegs,
+ AssignFnTy AssignValToReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
- unsigned NumArgs = Args.size();
- Function::const_arg_iterator CurOrigArg = Args.begin();
- const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
- for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
- MVT ValVT = MVT::getVT(CurOrigArg->getType());
- CCAssignFn *AssignFn =
- TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
- bool Res =
- AssignFn(i, ValVT, ValVT, CCValAssign::Full, ISD::ArgFlagsTy(), CCInfo);
+ unsigned NumArgs = ArgTypes.size();
+ auto CurVT = ArgTypes.begin();
+ for (unsigned i = 0; i != NumArgs; ++i, ++CurVT) {
+ bool Res = AssignFn(i, *CurVT, *CurVT, CCValAssign::Full, ISD::ArgFlagsTy(),
+ CCInfo);
assert(!Res && "Call operand has unhandled type");
(void)Res;
}
- assert(ArgLocs.size() == Args.size() &&
+ assert(ArgLocs.size() == ArgTypes.size() &&
"We have a different number of location and args?!");
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
assert(VA.isRegLoc() && "Not yet implemented");
- // Transform the arguments in physical registers into virtual ones.
- MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
- MIRBuilder.buildCopy(VRegs[i], VA.getLocReg());
switch (VA.getLocInfo()) {
default:
assert(0 && "Not yet implemented");
break;
}
+
+ // Everything checks out, tell the caller where we've decided this
+ // parameter/return value should go.
+ AssignValToReg(MIRBuilder, ArgRegs[i], VA.getLocReg());
}
return true;
}
+
+bool AArch64CallLowering::lowerFormalArguments(
+ MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
+ ArrayRef<unsigned> VRegs) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = *MF.getFunction();
+
+ SmallVector<MVT, 8> ArgTys;
+ for (auto &Arg : Args)
+ ArgTys.push_back(MVT::getVT(Arg.getType()));
+
+ const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
+ CCAssignFn *AssignFn =
+ TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
+
+ return handleAssignments(
+ MIRBuilder, AssignFn, ArgTys, VRegs,
+ [](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.getMBB().addLiveIn(PhysReg);
+ MIRBuilder.buildCopy(ValReg, PhysReg);
+ });
+}
+
+bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
+ const CallInst &CI, unsigned CalleeReg,
+ unsigned ResReg,
+ ArrayRef<unsigned> ArgRegs) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = *MF.getFunction();
+
+ // First step is to marshall all the function's parameters into the correct
+ // physregs and memory locations. Gather the sequence of argument types that
+ // we'll pass to the assigner function.
+ SmallVector<MVT, 8> ArgTys;
+ for (auto &Arg : CI.arg_operands())
+ ArgTys.push_back(MVT::getVT(Arg->getType()));
+
+ // Find out which ABI gets to decide where things go.
+ const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
+ CCAssignFn *CallAssignFn =
+ TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
+
+ // And finally we can do the actual assignments. For a call we need to keep
+ // track of the registers used because they'll be implicit uses of the BL.
+ SmallVector<unsigned, 8> PhysRegs;
+ handleAssignments(
+ MIRBuilder, CallAssignFn, ArgTys, ArgRegs,
+ [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+ PhysRegs.push_back(PhysReg);
+ });
+
+ // Now we can build the actual call instruction.
+ MachineInstrBuilder MIB;
+ if (CalleeReg)
+ MIB = MIRBuilder.buildInstr(AArch64::BLR).addUse(CalleeReg);
+ else
+ MIB = MIRBuilder.buildInstr(AArch64::BL)
+ .addGlobalAddress(CI.getCalledFunction());
+
+ // Tell the call which registers are clobbered.
+ auto TRI = MF.getSubtarget().getRegisterInfo();
+ MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
+
+ for (auto Reg : PhysRegs)
+ MIB.addUse(Reg, RegState::Implicit);
+
+ // Finally we can copy the returned value back into its virtual-register. In
+ // symmetry with the arugments, the physical register must be an
+ // implicit-define of the call instruction.
+ CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
+ if (!CI.getType()->isVoidTy())
+ handleAssignments(
+ MIRBuilder, RetAssignFn, MVT::getVT(CI.getType()), ResReg,
+ [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildCopy(ValReg, PhysReg);
+ MIB.addDef(PhysReg, RegState::Implicit);
+ });
+
+ return true;
+}