CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
unsigned &NumBytes);
- bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
+ bool finishCall(CallLoweringInfo &CLI, unsigned NumBytes);
public:
// Backend specific FastISel code.
return true;
}
-bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
- unsigned NumBytes) {
+bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, unsigned NumBytes) {
CallingConv::ID CC = CLI.CallConv;
// Issue CALLSEQ_END
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(0);
- // Now the return value.
- if (RetVT != MVT::isVoid) {
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
- CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC));
+ // Now the return values.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(CLI.Ins, CCAssignFnForCall(CC));
- // Only handle a single return value.
- if (RVLocs.size() != 1)
- return false;
-
- // Copy all of the result registers out of their specified physreg.
- MVT CopyVT = RVLocs[0].getValVT();
+ Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ MVT CopyVT = VA.getValVT();
+ unsigned CopyReg = ResultReg + i;
// TODO: Handle big-endian results
if (CopyVT.isVector() && !Subtarget->isLittleEndian())
return false;
- Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(RVLocs[0].getLocReg());
- CLI.InRegs.push_back(RVLocs[0].getLocReg());
-
- CLI.ResultReg = ResultReg;
- CLI.NumResultRegs = 1;
+ // Copy result out of their specified physreg.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
+ CopyReg)
+ .addReg(VA.getLocReg());
+ CLI.InRegs.push_back(VA.getLocReg());
}
+ CLI.ResultReg = ResultReg;
+ CLI.NumResultRegs = RVLocs.size();
+
return true;
}
if (IsVarArg)
return false;
- // FIXME: Only handle *simple* calls for now.
- MVT RetVT;
- if (CLI.RetTy->isVoidTy())
- RetVT = MVT::isVoid;
- else if (!isTypeLegal(CLI.RetTy, RetVT))
- return false;
-
for (auto Flag : CLI.OutFlags)
if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal() ||
Flag.isSwiftSelf() || Flag.isSwiftAsync() || Flag.isSwiftError())
CLI.Call = MIB;
// Finish off the call including any return values.
- return finishCall(CLI, RetVT, NumBytes);
+ return finishCall(CLI, NumBytes);
}
bool AArch64FastISel::isMemCpySmall(uint64_t Len, MaybeAlign Alignment) {
--- /dev/null
+; RUN: llc -fast-isel -pass-remarks-missed=isel < %s 2>&1 >/dev/null | FileCheck -check-prefix=STDERR -allow-empty %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-linux-gnu"
+
+declare { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 } @ret_s10i64()
+
+define i64 @call_ret_s10i64() {
+; STDERR: FastISel missed call: %ret = call { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 } @ret_s10i64() (in function: call_ret_s10i64)
+ %ret = call { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 } @ret_s10i64()
+ %ext0 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 } %ret, 0
+ %ext1 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 } %ret, 1
+ %sum = add i64 %ext0, %ext1
+ ret i64 %sum
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no-generate-body-for-unused-prefixes
+; RUN: llc -fast-isel -fast-isel-abort=3 < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-linux-gnu"
+
+declare { i64, i64 } @ret_s2i64()
+
+define i64 @call_ret_s2i64() {
+; CHECK-LABEL: call_ret_s2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl ret_s2i64
+; CHECK-NEXT: add x0, x0, x1
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %ret = call { i64, i64 } @ret_s2i64()
+ %ext0 = extractvalue { i64, i64 } %ret, 0
+ %ext1 = extractvalue { i64, i64 } %ret, 1
+ %sum = add i64 %ext0, %ext1
+ ret i64 %sum
+}
+
+declare { i64, i64, i64, i64, i64, i64, i64, i64 } @ret_s8i64()
+
+define i64 @call_ret_s8i64() {
+; CHECK-LABEL: call_ret_s8i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl ret_s8i64
+; CHECK-NEXT: add x0, x0, x7
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %ret = call { i64, i64, i64, i64, i64, i64, i64, i64 } @ret_s8i64()
+ %ext0 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %ret, 0
+ %ext7 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %ret, 7
+ %sum = add i64 %ext0, %ext7
+ ret i64 %sum
+}