AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
bool IsSibCall = false;
- bool IsWin64 =
- Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
+ bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CallConv);
// Check callee args/returns for SVE registers and set calling convention
// accordingly.
bool UseVarArgCC = !Outs[i].IsFixed;
// On Windows, the fixed arguments in a vararg call are passed in GPRs
// too, so use the vararg CC to force them to integer registers.
- if (IsWin64)
+ if (IsCalleeWin64)
UseVarArgCC = true;
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstrBuilder MIB, CCAssignFn *AssignFn,
- CCAssignFn *AssignFnVarArg, bool IsVarArg,
- bool IsTailCall = false, int FPDiff = 0)
+ CCAssignFn *AssignFnVarArg, bool IsTailCall = false,
+ int FPDiff = 0)
: OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff),
- StackSize(0), SPReg(0) {
- MachineFunction &MF = MIRBuilder.getMF();
- const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
- bool IsWin =
- Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
- UseVarArgsCCForFixed = IsVarArg && IsWin;
- }
+ StackSize(0), SPReg(0),
+ Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
ISD::ArgFlagsTy Flags,
CCState &State) override {
bool Res;
+ bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
+ bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
if (Info.IsFixed && !UseVarArgsCCForFixed)
Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
else
MachineInstrBuilder MIB;
CCAssignFn *AssignFnVarArg;
bool IsTailCall;
- bool UseVarArgsCCForFixed;
/// For tail calls, the byte offset of the call's argument area from the
/// callee's. Unused elsewhere.
// Cache the SP register vreg if we need it more than once in this call site.
Register SPReg;
+
+ const AArch64Subtarget &Subtarget;
};
} // namespace
splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
}
- OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn,
- F.isVarArg());
+ OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
Success =
handleAssignments(MIRBuilder, SplitArgs, Handler, CC, F.isVarArg());
}
// Do the actual argument marshalling.
OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
- AssignFnVarArg, Info.IsVarArg, true, FPDiff);
+ AssignFnVarArg, true, FPDiff);
if (!handleAssignments(MIRBuilder, OutArgs, Handler, CalleeCC, Info.IsVarArg))
return false;
// Do the actual argument marshalling.
OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
- AssignFnVarArg, Info.IsVarArg, false);
+ AssignFnVarArg, false);
if (!handleAssignments(MIRBuilder, OutArgs, Handler, Info.CallConv,
Info.IsVarArg))
return false;
; GISEL: fmov d0, #3.00000000
; CHECK: mov w3, #4
; CHECK: b other_d_va_fn
- tail call void (double, i32, ...) @other_d_va_fn(double 1.000000e+00, i32 2, double 3.000000e+00, i32 4) #4
+ tail call void (double, i32, ...) @other_d_va_fn(double 1.000000e+00, i32 2, double 3.000000e+00, i32 4)
ret void
}
-declare dso_local void @other_d_va_fn(double, i32, ...)
+declare void @other_d_va_fn(double, i32, ...)
+
+define void @call_d_non_va() nounwind {
+entry:
+; CHECK-LABEL: call_d_non_va:
+; CHECK-DAG: fmov d0, #1.00000000
+; CHECK-DAG: fmov d1, #3.00000000
+; CHECK-DAG: mov w0, #2
+; CHECK-DAG: mov w1, #4
+; CHECK: b other_d_non_va_fn
+ tail call void (double, i32, double, i32) @other_d_non_va_fn(double 1.000000e+00, i32 2, double 3.000000e+00, i32 4)
+ ret void
+}
+
+declare void @other_d_non_va_fn(double, i32, double, i32)
--- /dev/null
+; RUN: llc < %s -mtriple=aarch64-linux -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,DAGISEL
+; RUN: llc < %s -mtriple=aarch64-linux -verify-machineinstrs -O0 -fast-isel | FileCheck %s --check-prefixes=CHECK,O0
+; RUN: llc < %s -mtriple=aarch64-linux -verify-machineinstrs -O0 -global-isel | FileCheck %s --check-prefixes=CHECK,O0
+
+define win64cc void @float_va_fn(float %a, i32 %b, ...) nounwind {
+entry:
+; CHECK-LABEL: float_va_fn:
+; O0: str x7, [sp, #72]
+; O0: str x6, [sp, #64]
+; O0: str x5, [sp, #56]
+; O0: str x4, [sp, #48]
+; O0: str x3, [sp, #40]
+; O0: str x2, [sp, #32]
+; CHECK: fmov s0, w0
+; O0: add x8, sp, #32
+; O0: str x8, [sp, #8]
+; O0: ldr x0, [sp, #8]
+; DAGISEL: add x0, sp, #32
+; DAGISEL: stp x2, x3, [sp, #32]
+; DAGISEL: stp x4, x5, [sp, #48]
+; DAGISEL: stp x6, x7, [sp, #64]
+; CHECK: bl f_va_list
+ %ap = alloca i8*, align 8
+ %0 = bitcast i8** %ap to i8*
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
+ call void @llvm.va_start(i8* nonnull %0)
+ %1 = load i8*, i8** %ap, align 8
+ call void @f_va_list(float %a, i8* %1)
+ call void @llvm.va_end(i8* nonnull %0)
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+ ret void
+}
+
+declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.va_start(i8*)
+declare void @f_va_list(float, i8*)
+declare void @llvm.va_end(i8*)
+declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+
+define win64cc void @double_va_fn(double %a, i32 %b, ...) nounwind {
+entry:
+; CHECK-LABEL: double_va_fn:
+; O0: str x7, [sp, #72]
+; O0: str x6, [sp, #64]
+; O0: str x5, [sp, #56]
+; O0: str x4, [sp, #48]
+; O0: str x3, [sp, #40]
+; O0: str x2, [sp, #32]
+; CHECK: fmov d0, x0
+; O0: add x8, sp, #32
+; O0: str x8, [sp, #8]
+; O0: ldr x0, [sp, #8]
+; DAGISEL: add x0, sp, #32
+; DAGISEL: stp x2, x3, [sp, #32]
+; DAGISEL: stp x4, x5, [sp, #48]
+; DAGISEL: stp x6, x7, [sp, #64]
+; CHECK: bl d_va_list
+ %ap = alloca i8*, align 8
+ %0 = bitcast i8** %ap to i8*
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
+ call void @llvm.va_start(i8* nonnull %0)
+ %1 = load i8*, i8** %ap, align 8
+ call void @d_va_list(double %a, i8* %1)
+ call void @llvm.va_end(i8* nonnull %0)
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+ ret void
+}
+
+declare void @d_va_list(double, i8*)
+
+define void @call_f_va() nounwind {
+entry:
+; CHECK-LABEL: call_f_va:
+; DAGISEL: mov w0, #1065353216
+; FASTISEL: mov w0, #1065353216
+; GISEL: fmov s0, #1.00000000
+; GISEL: fmov w0, s0
+; CHECK: mov w1, #2
+; DAGISEL: mov x2, #4613937818241073152
+; FASTISEL: mov x2, #4613937818241073152
+; GISEL: fmov d0, #3.00000000
+; GISEL: fmov x2, d0
+; CHECK: mov w3, #4
+; CHECK: bl other_f_va_fn
+ tail call win64cc void (float, i32, ...) @other_f_va_fn(float 1.000000e+00, i32 2, double 3.000000e+00, i32 4)
+ ret void
+}
+
+declare win64cc void @other_f_va_fn(float, i32, ...)
+
+define void @call_d_va() nounwind {
+entry:
+; CHECK-LABEL: call_d_va:
+; DAGISEL: mov x0, #4607182418800017408
+; FASTISEL: mov x0, #4607182418800017408
+; GISEL: fmov d0, #1.00000000
+; GISEL: fmov x0, d0
+; CHECK: mov w1, #2
+; DAGISEL: mov x2, #4613937818241073152
+; FASTISEL: mov x2, #4613937818241073152
+; GISEL: fmov d0, #3.00000000
+; CHECK: mov w3, #4
+; CHECK: bl other_d_va_fn
+ tail call win64cc void (double, i32, ...) @other_d_va_fn(double 1.000000e+00, i32 2, double 3.000000e+00, i32 4)
+ ret void
+}
+
+declare win64cc void @other_d_va_fn(double, i32, ...)
+
+define void @call_d_non_va() nounwind {
+entry:
+; CHECK-LABEL: call_d_non_va:
+; CHECK-DAG: fmov d0, #1.00000000
+; CHECK-DAG: fmov d1, #3.00000000
+; CHECK-DAG: mov w0, #2
+; CHECK-DAG: mov w1, #4
+; CHECK: bl other_d_non_va_fn
+ tail call win64cc void (double, i32, double, i32) @other_d_non_va_fn(double 1.000000e+00, i32 2, double 3.000000e+00, i32 4)
+ ret void
+}
+
+declare win64cc void @other_d_non_va_fn(double, i32, double, i32)