bool HasReturn;
bool HasIndirectBr;
bool HasUninlineableIntrinsic;
- bool UsesVarArgs;
+ bool InitsVargArgs;
/// Number of bytes allocated statically by the callee.
uint64_t AllocatedSize;
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
- HasUninlineableIntrinsic(false), UsesVarArgs(false), AllocatedSize(0),
+ HasUninlineableIntrinsic(false), InitsVargArgs(false), AllocatedSize(0),
NumInstructions(0), NumVectorInstructions(0), VectorBonus(0),
SingleBBBonus(0), EnableLoadElimination(true), LoadEliminationCost(0),
NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
HasUninlineableIntrinsic = true;
return false;
case Intrinsic::vastart:
- case Intrinsic::vaend:
- UsesVarArgs = true;
+ InitsVargArgs = true;
return false;
}
}
IR = "indirect branch";
else if (HasUninlineableIntrinsic)
IR = "uninlinable intrinsic";
- else if (UsesVarArgs)
+ else if (InitsVargArgs)
IR = "varargs";
if (!IR) {
if (ORE)
// Disallow inlining functions that call @llvm.localescape. Doing this
// correctly would require major changes to the inliner.
case llvm::Intrinsic::localescape:
- // Disallow inlining of functions that access VarArgs.
+ // Disallow inlining of functions that initialize VarArgs with va_start.
case llvm::Intrinsic::vastart:
- case llvm::Intrinsic::vaend:
return false;
}
}
; CHECK: %res1 = call i32 (...) @varg_accessed(i32 10)
; CHECK-NEXT: %res2 = call i32 (...) @varg_accessed_alwaysinline(i32 15)
+define void @caller_with_vastart(i8* noalias nocapture readnone %args, ...) {
+entry:
+ %ap = alloca i8*, align 4
+ %ap.ptr = bitcast i8** %ap to i8*
+ %ap2 = alloca i8*, align 4
+ %ap2.ptr = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* nonnull %ap.ptr)
+ call fastcc void @callee_with_vaend(i8* nonnull %ap.ptr)
+ call void @llvm.va_start(i8* nonnull %ap2.ptr)
+ call fastcc void @callee_with_vaend_alwaysinline(i8* nonnull %ap2.ptr)
+ ret void
+}
+
+define internal fastcc void @callee_with_vaend_alwaysinline(i8* %a) alwaysinline {
+entry:
+ tail call void @llvm.va_end(i8* %a)
+ ret void
+}
+
+define internal fastcc void @callee_with_vaend(i8* %a) {
+entry:
+ tail call void @llvm.va_end(i8* %a)
+ ret void
+}
+
+; CHECK-LABEL: @caller_with_vastart
+; CHECK-NOT: @callee_with_vaend
+; CHECK-NOT: @callee_with_vaend_alwaysinline
+
declare void @llvm.va_start(i8*)
declare void @llvm.va_end(i8*)