return ABIArgInfo::getExtend(Ty);
}
+ if (const auto * EIT = Ty->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() <= 64) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getDirect();
return;
}
+ if (const auto *EITy = Ty->getAs<ExtIntType>()) {
+ if (EITy->getNumBits() <= 64)
+ Current = Integer;
+ else if (EITy->getNumBits() <= 128)
+ Lo = Hi = Integer;
+ // Larger values need to get passed in memory.
+ return;
+ }
+
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (!Ty->isExtIntType())
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
return getNaturalAlignIndirect(Ty);
// the argument in the free register. This does not seem to happen currently,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
- if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
+ !Ty->isExtIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
}
}
+ if (Ty->isExtIntType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
+ // anyway as long is it fits in them, so we don't have to check the power of
+ // 2.
+ if (Width <= 64)
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
return ABIArgInfo::getDirect();
}
-// RUN: %clang_cc1 -triple x86_64-gnu-linux -fsanitize=array-bounds,enum,float-cast-overflow,integer-divide-by-zero,implicit-unsigned-integer-truncation,implicit-signed-integer-truncation,implicit-integer-sign-change,unsigned-integer-overflow,signed-integer-overflow,shift-base,shift-exponent -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-gnu-linux -fsanitize=array-bounds,enum,float-cast-overflow,integer-divide-by-zero,implicit-unsigned-integer-truncation,implicit-signed-integer-truncation,implicit-integer-sign-change,unsigned-integer-overflow,signed-integer-overflow,shift-base,shift-exponent -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s
// CHECK: define void @_Z6BoundsRA10_KiU7_ExtIntILi15EEi
i = E;
// CHECK: %[[LOADE:.+]] = load i35
- // CHECK: %[[CONV:.+]] = trunc i35 %[[LOADE]] to i32
+ // CHECK: store i35 %[[LOADE]], i35* %[[EADDR:.+]]
+ // CHECK: %[[LOADE2:.+]] = load i35, i35* %[[EADDR]]
+ // CHECK: %[[CONV:.+]] = trunc i35 %[[LOADE2]] to i32
// CHECK: %[[EXT:.+]] = zext i32 %[[CONV]] to i35
- // CHECK: %[[CHECK:.+]] = icmp eq i35 %[[EXT]], %[[LOADE]]
+ // CHECK: %[[CHECK:.+]] = icmp eq i35 %[[EXT]], %[[LOADE2]]
// CHECK: br i1 %[[CHECK]]
// CHECK: call void @__ubsan_handle_implicit_conversion_abort
j = E;
// CHECK: %[[LOADE:.+]] = load i35
- // CHECK: %[[CONV:.+]] = trunc i35 %[[LOADE]] to i32
+ // CHECK: store i35 %[[LOADE]], i35* %[[EADDR:.+]]
+ // CHECK: %[[LOADE2:.+]] = load i35, i35* %[[EADDR]]
+ // CHECK: %[[CONV:.+]] = trunc i35 %[[LOADE2]] to i32
// CHECK: %[[EXT:.+]] = zext i32 %[[CONV]] to i35
- // CHECK: %[[CHECK:.+]] = icmp eq i35 %[[EXT]], %[[LOADE]]
+ // CHECK: %[[CHECK:.+]] = icmp eq i35 %[[EXT]], %[[LOADE2]]
// CHECK: br i1 %[[CHECK]]
// CHECK: call void @__ubsan_handle_implicit_conversion_abort
// CHECK: define void @_Z15SignChangeCheckU7_ExtIntILi39EEjU7_ExtIntILi39EEi
void SignChangeCheck(unsigned _ExtInt(39) UE, _ExtInt(39) E) {
UE = E;
+ // CHECK: %[[LOADEU:.+]] = load i39
// CHECK: %[[LOADE:.+]] = load i39
- // CHECK: %[[NEG:.+]] = icmp slt i39 %[[LOADE]], 0
+ // CHECK: store i39 %[[LOADE]], i39* %[[EADDR:.+]]
+ // CHECK: %[[LOADE2:.+]] = load i39, i39* %[[EADDR]]
+ // CHECK: %[[NEG:.+]] = icmp slt i39 %[[LOADE2]], 0
// CHECK: %[[SIGNCHECK:.+]] = icmp eq i1 %[[NEG]], false
// CHECK: br i1 %[[SIGNCHECK]]
// CHECK: call void @__ubsan_handle_implicit_conversion_abort
-
E = UE;
- // CHECK: %[[LOADUE:.+]] = load i39
- // CHECK: %[[NEG:.+]] = icmp slt i39 %[[LOADUE]], 0
+ // CHECK: store i39 %[[LOADE2]], i39* %[[UEADDR:.+]]
+ // CHECK: %[[LOADUE2:.+]] = load i39, i39* %[[UEADDR]]
+ // CHECK: %[[NEG:.+]] = icmp slt i39 %[[LOADUE2]], 0
// CHECK: %[[SIGNCHECK:.+]] = icmp eq i1 false, %[[NEG]]
// CHECK: br i1 %[[SIGNCHECK]]
// CHECK: call void @__ubsan_handle_implicit_conversion_abort
// Also triggers signed integer overflow.
E / E;
- // CHECK: %[[E:.+]] = load i11, i11*
- // CHECK: %[[E2:.+]] = load i11, i11*
+ // CHECK: %[[E1LOAD:.+]] = load i11
+ // CHECK: store i11 %[[E1LOAD]], i11* %[[EADDR:.+]]
+ // CHECK: %[[E:.+]] = load i11, i11* %[[EADDR]]
+ // CHECK: %[[E2:.+]] = load i11, i11* %[[EADDR]]
// CHECK: %[[NEZERO:.+]] = icmp ne i11 %[[E2]], 0
// CHECK: %[[NEMIN:.+]] = icmp ne i11 %[[E]], -1024
// CHECK: %[[NENEG1:.+]] = icmp ne i11 %[[E2]], -1
// CHECK: define void @_Z6ShiftsU7_ExtIntILi9EEi
void Shifts(_ExtInt(9) E) {
E >> E;
- // CHECK: %[[LHSE:.+]] = load i9, i9*
- // CHECK: %[[RHSE:.+]] = load i9, i9*
+ // CHECK: %[[E1LOAD:.+]] = load i9, i9*
+ // CHECK: store i9 %[[E1LOAD]], i9* %[[EADDR:.+]]
+ // CHECK: %[[LHSE:.+]] = load i9, i9* %[[EADDR]]
+ // CHECK: %[[RHSE:.+]] = load i9, i9* %[[EADDR]]
// CHECK: %[[CMP:.+]] = icmp ule i9 %[[RHSE]], 8
// CHECK: br i1 %[[CMP]]
// CHECK: call void @__ubsan_handle_shift_out_of_bounds_abort
_ExtInt(4) SmallestE,
_ExtInt(31) JustRightE) {
BiggestE + BiggestE;
- // CHECK: %[[LOAD1:.+]] = load i93, i93*
- // CHECK: %[[LOAD2:.+]] = load i93, i93*
+ // CHECK: %[[LOADBIGGESTE2:.+]] = load i93
+ // CHECK: store i93 %[[LOADBIGGESTE2]], i93* %[[BIGGESTEADDR:.+]]
+ // CHECK: %[[LOAD1:.+]] = load i93, i93* %[[BIGGESTEADDR]]
+ // CHECK: %[[LOAD2:.+]] = load i93, i93* %[[BIGGESTEADDR]]
// CHECK: %[[OFCALL:.+]] = call { i93, i1 } @llvm.sadd.with.overflow.i93(i93 %[[LOAD1]], i93 %[[LOAD2]])
// CHECK: %[[EXRESULT:.+]] = extractvalue { i93, i1 } %[[OFCALL]], 0
// CHECK: %[[OFRESULT:.+]] = extractvalue { i93, i1 } %[[OFCALL]], 1
unsigned _ExtInt(23) SmallE,
unsigned _ExtInt(35) BigE) {
u = SmallE + SmallE;
- // CHECK: %[[LOADE1:.+]] = load i23, i23*
- // CHECK: %[[LOADE2:.+]] = load i23, i23*
+ // CHECK: %[[LOADBIGGESTE2:.+]] = load i23
+ // CHECK: store i23 %[[LOADBIGGESTE2]], i23* %[[BIGGESTEADDR:.+]]
+ // CHECK: %[[LOADE1:.+]] = load i23, i23* %[[BIGGESTEADDR]]
+ // CHECK: %[[LOADE2:.+]] = load i23, i23* %[[BIGGESTEADDR]]
// CHECK: %[[OFCALL:.+]] = call { i23, i1 } @llvm.uadd.with.overflow.i23(i23 %[[LOADE1]], i23 %[[LOADE2]])
// CHECK: %[[EXRESULT:.+]] = extractvalue { i23, i1 } %[[OFCALL]], 0
// CHECK: %[[OFRESULT:.+]] = extractvalue { i23, i1 } %[[OFCALL]], 1
-// RUN: %clang_cc1 -triple x86_64-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK
-// RUN: %clang_cc1 -triple x86_64-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_cc1 -triple x86_64-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,LIN64,CHECK64
+// RUN: %clang_cc1 -triple x86_64-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,WIN64,CHECK64
+// RUN: %clang_cc1 -triple i386-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,LIN32,CHECK32
+// RUNX: %clang_cc1 -triple i386-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,WIN32,CHECK32
void GenericTest(_ExtInt(3) a, unsigned _ExtInt(3) b, _ExtInt(4) c) {
void VLATest(_ExtInt(3) A, _ExtInt(99) B, _ExtInt(123456) C) {
// CHECK: define {{.*}}void @VLATest
int AR1[A];
- // CHECK: %[[A:.+]] = zext i3 %{{.+}} to i64
- // CHECK: %[[VLA1:.+]] = alloca i32, i64 %[[A]]
+ // CHECK: %[[A:.+]] = zext i3 %{{.+}} to i[[INDXSIZE:[0-9]+]]
+ // CHECK: %[[VLA1:.+]] = alloca i32, i[[INDXSIZE]] %[[A]]
int AR2[B];
- // CHECK: %[[B:.+]] = trunc i99 %{{.+}} to i64
- // CHECK: %[[VLA2:.+]] = alloca i32, i64 %[[B]]
+ // CHECK: %[[B:.+]] = trunc i99 %{{.+}} to i[[INDXSIZE]]
+ // CHECK: %[[VLA2:.+]] = alloca i32, i[[INDXSIZE]] %[[B]]
int AR3[C];
- // CHECK: %[[C:.+]] = trunc i123456 %{{.+}} to i64
- // CHECK: %[[VLA3:.+]] = alloca i32, i64 %[[C]]
+ // CHECK: %[[C:.+]] = trunc i123456 %{{.+}} to i[[INDXSIZE]]
+ // CHECK: %[[VLA3:.+]] = alloca i32, i[[INDXSIZE]] %[[C]]
}
struct S {
};
void OffsetOfTest() {
- // CHECK: define {{.*}}void @OffsetOfTest
+ // CHECK: define {{.*}}void @OffsetOfTest
int A = __builtin_offsetof(struct S,A);
// CHECK: store i32 0, i32* %{{.+}}
int B = __builtin_offsetof(struct S,B);
- // CHECK: store i32 8, i32* %{{.+}}
+ // CHECK64: store i32 8, i32* %{{.+}}
+ // CHECK32: store i32 4, i32* %{{.+}}
int C = __builtin_offsetof(struct S,C);
- // CHECK: store i32 2097160, i32* %{{.+}}
+ // CHECK64: store i32 2097160, i32* %{{.+}}
+ // CHECK32: store i32 2097156, i32* %{{.+}}
}
-
+// Make sure 128 and 64 bit versions are passed like integers, and that >128
+// is passed indirectly.
+void ParamPassing(_ExtInt(129) a, _ExtInt(128) b, _ExtInt(64) c) {}
+// LIN64: define void @ParamPassing(i129* byval(i129) align 8 %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i64 %{{.+}})
+// WIN64: define dso_local void @ParamPassing(i129* %{{.+}}, i128* %{{.+}}, i64 %{{.+}})
+// LIN32: define void @ParamPassing(i129* %{{.+}}, i128* %{{.+}}, i64 %{{.+}})
+// WIN32: define dso_local void @ParamPassing(i129* %{{.+}}, i128* %{{.+}}, i64 %{{.+}})
+void ParamPassing2(_ExtInt(129) a, _ExtInt(127) b, _ExtInt(63) c) {}
+// LIN64: define void @ParamPassing2(i129* byval(i129) align 8 %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i64 %{{.+}})
+// WIN64: define dso_local void @ParamPassing2(i129* %{{.+}}, i127* %{{.+}}, i63 %{{.+}})
+// LIN32: define void @ParamPassing2(i129* %{{.+}}, i127* %{{.+}}, i63 %{{.+}})
+// WIN32: define dso_local void @ParamPassing2(i129* %{{.+}}, i127* %{{.+}}, i63 %{{.+}})
+_ExtInt(63) ReturnPassing(){}
+// LIN64: define i64 @ReturnPassing(
+// WIN64: define dso_local i63 @ReturnPassing(
+// LIN32: define i63 @ReturnPassing(
+// WIN32: define dso_local i64 @ReturnPassing(
+_ExtInt(64) ReturnPassing2(){}
+// LIN64: define i64 @ReturnPassing2(
+// WIN64: define dso_local i64 @ReturnPassing2(
+// LIN32: define i64 @ReturnPassing2(
+// WIN32: define dso_local i64 @ReturnPassing2(
+_ExtInt(127) ReturnPassing3(){}
+// LIN64: define { i64, i64 } @ReturnPassing3(
+// WIN64: define dso_local void @ReturnPassing3(i127* noalias sret
+// LIN32: define i127 @ReturnPassing3(
+// WIN32: define dso_local i127 @ReturnPassing3(
+_ExtInt(128) ReturnPassing4(){}
+// LIN64: define { i64, i64 } @ReturnPassing4(
+// WIN64: define dso_local void @ReturnPassing4(i128* noalias sret
+// LIN32: define i128 @ReturnPassing4(
+// WIN32: define dso_local i64 @ReturnPassing4(
+_ExtInt(129) ReturnPassing5(){}
+// LIN64: define void @ReturnPassing5(i129* noalias sret
+// WIN64: define dso_local void @ReturnPassing5(i129* noalias sret
+// LIN32: define i129 @ReturnPassing5(
+// WIN32: define dso_local i129 @ReturnPassing5(
};
void UnderlyingTypeUsage(AsEnumUnderlyingType Param) {
- // LIN: define void @_Z19UnderlyingTypeUsage20AsEnumUnderlyingType(i9 %
+ // LIN: define void @_Z19UnderlyingTypeUsage20AsEnumUnderlyingType(i16 %
// WIN: define dso_local void @"?UnderlyingTypeUsage@@YAXW4AsEnumUnderlyingType@@@Z"(i9 %
AsEnumUnderlyingType Var;
// CHECK: alloca i9, align 2
}
unsigned _ExtInt(33) ManglingTestRetParam(unsigned _ExtInt(33) Param) {
-// LIN: define i33 @_Z20ManglingTestRetParamU7_ExtIntILi33EEj(i33 %
+// LIN: define i64 @_Z20ManglingTestRetParamU7_ExtIntILi33EEj(i64 %
// WIN: define dso_local i33 @"?ManglingTestRetParam@@YAU?$_UExtInt@$0CB@@__clang@@U12@@Z"(i33
return 0;
}
_ExtInt(33) ManglingTestRetParam(_ExtInt(33) Param) {
-// LIN: define i33 @_Z20ManglingTestRetParamU7_ExtIntILi33EEi(i33 %
+// LIN: define i64 @_Z20ManglingTestRetParamU7_ExtIntILi33EEi(i64 %
// WIN: define dso_local i33 @"?ManglingTestRetParam@@YAU?$_ExtInt@$0CB@@__clang@@U12@@Z"(i33
return 0;
}
_ExtInt(92) A = __builtin_va_arg(args, _ExtInt(92));
// LIN: %[[AD1:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %[[ARGS]]
- // LIN: %[[OFA_P1:.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %[[AD1]], i32 0, i32 2
- // LIN: %[[OFA1:.+]] = load i8*, i8** %[[OFA_P1]]
- // LIN: %[[BC1:.+]] = bitcast i8* %[[OFA1]] to i92*
- // LIN: %[[OFANEXT1:.+]] = getelementptr i8, i8* %[[OFA1]], i32 16
- // LIN: store i8* %[[OFANEXT1]], i8** %[[OFA_P1]]
+ // LIN: %[[OFA_P1:.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %[[AD1]], i32 0, i32 0
+ // LIN: %[[GPOFFSET:.+]] = load i32, i32* %[[OFA_P1]]
+ // LIN: %[[FITSINGP:.+]] = icmp ule i32 %[[GPOFFSET]], 32
+ // LIN: br i1 %[[FITSINGP]]
+ // LIN: %[[BC1:.+]] = phi i92*
// LIN: %[[LOAD1:.+]] = load i92, i92* %[[BC1]]
// LIN: store i92 %[[LOAD1]], i92*
+
// WIN: %[[CUR1:.+]] = load i8*, i8** %[[ARGS]]
// WIN: %[[NEXT1:.+]] = getelementptr inbounds i8, i8* %[[CUR1]], i64 16
// WIN: store i8* %[[NEXT1]], i8** %[[ARGS]]
_ExtInt(31) B = __builtin_va_arg(args, _ExtInt(31));
// LIN: %[[AD2:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %[[ARGS]]
- // LIN: %[[OFA_P2:.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %[[AD2]], i32 0, i32 2
- // LIN: %[[OFA2:.+]] = load i8*, i8** %[[OFA_P2]]
- // LIN: %[[BC2:.+]] = bitcast i8* %[[OFA2]] to i31*
- // LIN: %[[OFANEXT2:.+]] = getelementptr i8, i8* %[[OFA2]], i32 8
- // LIN: store i8* %[[OFANEXT2]], i8** %[[OFA_P2]]
- // LIN: %[[LOAD2:.+]] = load i31, i31* %[[BC2]]
- // LIN: store i31 %[[LOAD2]], i31*
+ // LIN: %[[OFA_P2:.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %[[AD2]], i32 0, i32 0
+ // LIN: %[[GPOFFSET:.+]] = load i32, i32* %[[OFA_P2]]
+ // LIN: %[[FITSINGP:.+]] = icmp ule i32 %[[GPOFFSET]], 40
+ // LIN: br i1 %[[FITSINGP]]
+ // LIN: %[[BC1:.+]] = phi i31*
+ // LIN: %[[LOAD1:.+]] = load i31, i31* %[[BC1]]
+ // LIN: store i31 %[[LOAD1]], i31*
+
// WIN: %[[CUR2:.+]] = load i8*, i8** %[[ARGS]]
- // WIN: %[[NEXT2:.+]] = getelementptr inbounds i8, i8* %[[CUR2]], i64 8
+ // WIN: %[[NEXT2:.+]] = getelementptr inbounds i8, i8* %[[CUR2]], i64 8
// WIN: store i8* %[[NEXT2]], i8** %[[ARGS]]
// WIN: %[[BC2:.+]] = bitcast i8* %[[CUR2]] to i31*
// WIN: %[[LOADV2:.+]] = load i31, i31* %[[BC2]]
_ExtInt(16) C = __builtin_va_arg(args, _ExtInt(16));
// LIN: %[[AD3:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %[[ARGS]]
- // LIN: %[[OFA_P3:.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %[[AD3]], i32 0, i32 2
- // LIN: %[[OFA3:.+]] = load i8*, i8** %[[OFA_P3]]
- // LIN: %[[BC3:.+]] = bitcast i8* %[[OFA3]] to i16*
- // LIN: %[[OFANEXT3:.+]] = getelementptr i8, i8* %[[OFA3]], i32 8
- // LIN: store i8* %[[OFANEXT3]], i8** %[[OFA_P3]]
- // LIN: %[[LOAD3:.+]] = load i16, i16* %[[BC3]]
- // LIN: store i16 %[[LOAD3]], i16*
+ // LIN: %[[OFA_P3:.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %[[AD3]], i32 0, i32 0
+ // LIN: %[[GPOFFSET:.+]] = load i32, i32* %[[OFA_P3]]
+ // LIN: %[[FITSINGP:.+]] = icmp ule i32 %[[GPOFFSET]], 40
+ // LIN: br i1 %[[FITSINGP]]
+ // LIN: %[[BC1:.+]] = phi i16*
+ // LIN: %[[LOAD1:.+]] = load i16, i16* %[[BC1]]
+ // LIN: store i16 %[[LOAD1]], i16*
+
// WIN: %[[CUR3:.+]] = load i8*, i8** %[[ARGS]]
// WIN: %[[NEXT3:.+]] = getelementptr inbounds i8, i8* %[[CUR3]], i64 8
// WIN: store i8* %[[NEXT3]], i8** %[[ARGS]]
// LIN: store i8* %[[OFANEXT4]], i8** %[[OFA_P4]]
// LIN: %[[LOAD4:.+]] = load i129, i129* %[[BC4]]
// LIN: store i129 %[[LOAD4]], i129*
+
// WIN: %[[CUR4:.+]] = load i8*, i8** %[[ARGS]]
- // WIN: %[[NEXT4:.+]] = getelementptr inbounds i8, i8* %[[CUR4]], i64 24
+ // WIN: %[[NEXT4:.+]] = getelementptr inbounds i8, i8* %[[CUR4]], i64 24
// WIN: store i8* %[[NEXT4]], i8** %[[ARGS]]
// WIN: %[[BC4:.+]] = bitcast i8* %[[CUR4]] to i129*
// WIN: %[[LOADV4:.+]] = load i129, i129* %[[BC4]]
// LIN: store i8* %[[OFANEXT5]], i8** %[[OFA_P5]]
// LIN: %[[LOAD5:.+]] = load i16777200, i16777200* %[[BC5]]
// LIN: store i16777200 %[[LOAD5]], i16777200*
+
// WIN: %[[CUR5:.+]] = load i8*, i8** %[[ARGS]]
// WIN: %[[NEXT5:.+]] = getelementptr inbounds i8, i8* %[[CUR5]], i64 2097152
// WIN: store i8* %[[NEXT5]], i8** %[[ARGS]]
}
void ExplicitCasts() {
- // LIN: define void @_Z13ExplicitCastsv()
+ // LIN: define void @_Z13ExplicitCastsv()
// WIN: define dso_local void @"?ExplicitCasts@@YAXXZ"()
_ExtInt(33) a;
};
void OffsetOfTest() {
- // LIN: define void @_Z12OffsetOfTestv()
+ // LIN: define void @_Z12OffsetOfTestv()
// WIN: define dso_local void @"?OffsetOfTest@@YAXXZ"()
auto A = __builtin_offsetof(S,A);
// UB in C/C++, Defined in OpenCL.
Ext << 29;
- // CHECK: shl i28 %{{.+}}, 29
+ // CHECK: shl i28 %{{.+}}, 29
Ext >> 29;
// CHECK: ashr i28 %{{.+}}, 29
}