Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
-
- bool IsIndirect = false;
-
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
- if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
- uint64_t Width = getContext().getTypeSize(Ty);
- IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
- }
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
// GNU32: declare dso_local void @__mulxc3
// GNU64: declare dso_local void @__mulxc3
// MSC64: declare dso_local void @__muldc3
+
+void VarArgLD(int a, ...) {
+ // GNU32-LABEL: define{{.*}} void @VarArgLD
+ // GNU64-LABEL: define{{.*}} void @VarArgLD
+ // MSC64-LABEL: define{{.*}} void @VarArgLD
+ __builtin_va_list ap;
+ __builtin_va_start(ap, a);
+ long double LD = __builtin_va_arg(ap, long double);
+ // GNU32-NOT: load x86_fp80*, x86_fp80**
+ // GNU32: load x86_fp80, x86_fp80*
+ // GNU64: load x86_fp80*, x86_fp80**
+ // GNU64: load x86_fp80, x86_fp80*
+ // MSC64-NOT: load double*, double**
+ // MSC64: load double, double*
+ __builtin_va_end(ap);
+}
// GNU64: define dso_local <2 x i64> @bar(i128* %0, i128* %1)
// MSC64: define dso_local <2 x i64> @bar(i128* %0, i128* %1)
+
+void vararg(int a, ...) {
+ // GNU64-LABEL: define{{.*}} void @vararg
+ // MSC64-LABEL: define{{.*}} void @vararg
+ __builtin_va_list ap;
+ __builtin_va_start(ap, a);
+ int128_t i = __builtin_va_arg(ap, int128_t);
+ // GNU64: load i128*, i128**
+ // GNU64: load i128, i128*
+ // MSC64: load i128*, i128**
+ // MSC64: load i128, i128*
+ __builtin_va_end(ap);
+}
// LIN: store i92 %[[LOAD1]], i92*
// WIN: %[[CUR1:.+]] = load i8*, i8** %[[ARGS]]
- // WIN: %[[NEXT1:.+]] = getelementptr inbounds i8, i8* %[[CUR1]], i64 16
+ // WIN: %[[NEXT1:.+]] = getelementptr inbounds i8, i8* %[[CUR1]], i64 8
// WIN: store i8* %[[NEXT1]], i8** %[[ARGS]]
- // WIN: %[[BC1:.+]] = bitcast i8* %[[CUR1]] to i92*
- // WIN: %[[LOADV1:.+]] = load i92, i92* %[[BC1]]
+ // WIN: %[[BC1:.+]] = bitcast i8* %[[CUR1]] to i92**
+ // WIN: %[[LOADP1:.+]] = load i92*, i92** %[[BC1]]
+ // WIN: %[[LOADV1:.+]] = load i92, i92* %[[LOADP1]]
// WIN: store i92 %[[LOADV1]], i92*
_ExtInt(31) B = __builtin_va_arg(args, _ExtInt(31));
// LIN: store i129 %[[LOAD4]], i129*
// WIN: %[[CUR4:.+]] = load i8*, i8** %[[ARGS]]
- // WIN: %[[NEXT4:.+]] = getelementptr inbounds i8, i8* %[[CUR4]], i64 24
+ // WIN: %[[NEXT4:.+]] = getelementptr inbounds i8, i8* %[[CUR4]], i64 8
// WIN: store i8* %[[NEXT4]], i8** %[[ARGS]]
- // WIN: %[[BC4:.+]] = bitcast i8* %[[CUR4]] to i129*
- // WIN: %[[LOADV4:.+]] = load i129, i129* %[[BC4]]
+ // WIN: %[[BC4:.+]] = bitcast i8* %[[CUR4]] to i129**
+ // WIN: %[[LOADP4:.+]] = load i129*, i129** %[[BC4]]
+ // WIN: %[[LOADV4:.+]] = load i129, i129* %[[LOADP4]]
// WIN: store i129 %[[LOADV4]], i129*
_ExtInt(16777200) E = __builtin_va_arg(args, _ExtInt(16777200));
// LIN: store i16777200 %[[LOAD5]], i16777200*
// WIN: %[[CUR5:.+]] = load i8*, i8** %[[ARGS]]
- // WIN: %[[NEXT5:.+]] = getelementptr inbounds i8, i8* %[[CUR5]], i64 2097152
+ // WIN: %[[NEXT5:.+]] = getelementptr inbounds i8, i8* %[[CUR5]], i64 8
// WIN: store i8* %[[NEXT5]], i8** %[[ARGS]]
- // WIN: %[[BC5:.+]] = bitcast i8* %[[CUR5]] to i16777200*
- // WIN: %[[LOADV5:.+]] = load i16777200, i16777200* %[[BC5]]
+ // WIN: %[[BC5:.+]] = bitcast i8* %[[CUR5]] to i16777200**
+ // WIN: %[[LOADP5:.+]] = load i16777200*, i16777200** %[[BC5]]
+ // WIN: %[[LOADV5:.+]] = load i16777200, i16777200* %[[LOADP5]]
// WIN: store i16777200 %[[LOADV5]], i16777200*
__builtin_va_end(args);