From: Nick Desaulniers Date: Fri, 8 Oct 2021 22:17:54 +0000 (-0700) Subject: [InlineCost] model calls to llvm.is.constant* more carefully X-Git-Tag: upstream/15.0.7~29116 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9697f93587f46300814f1c6c68af347441d6e05d;p=platform%2Fupstream%2Fllvm.git [InlineCost] model calls to llvm.is.constant* more carefully llvm.is.constant* intrinsics are evaluated to 0 or 1 integral values. A common use case for llvm.is.constant comes from the higher level __builtin_constant_p. A common usage pattern of __builtin_constant_p in the Linux kernel is: void foo (int bar) { if (__builtin_constant_p(bar)) { // lots of code that will fold away to a constant. } else { // a little bit of code, usually a libcall. } } A minor issue in InlineCost calculations is when `bar` is _not_ Constant and still will not be after inlining, we don't discount the true branch and the inline cost of `foo` ends up being the cost of both branches together, rather than just the false branch. This leads to code like the above where inlining will not help prove bar Constant, but it still would be beneficial to inline foo, because the "true" branch is irrelevant from a cost perspective. For example, IPSCCP can sink a passed constant argument to foo: const int x = 42; void bar (void) { foo(x); } This improves our inlining decisions, and fixes a few head scratching cases were the disassembly shows a relatively small `foo` not inlined into a lone caller. We could further improve this modeling by tracking whether the argument to llvm.is.constant* is a parameter of the function, and if inlining would allow that parameter to become Constant. This idea is noted in a FIXME comment. Link: https://github.com/ClangBuiltLinux/linux/issues/1302 Reviewed By: kazu Differential Revision: https://reviews.llvm.org/D111272 --- diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 3cb56dc..f0419d1 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -387,6 +387,7 @@ protected: bool simplifyCallSite(Function *F, CallBase &Call); template bool simplifyInstruction(Instruction &I, Callable Evaluate); + bool simplifyIntrinsicCallIsConstant(CallBase &CB); ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); /// Return true if the given argument to the function being considered for @@ -1531,6 +1532,27 @@ bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) { return true; } +/// Try to simplify a call to llvm.is.constant. +/// +/// Duplicate the argument checking from CallAnalyzer::simplifyCallSite since +/// we expect calls of this specific intrinsic to be infrequent. +/// +/// FIXME: Given that we know CB's parent (F) caller +/// (CandidateCall->getParent()->getParent()), we might be able to determine +/// whether inlining F into F's caller would change how the call to +/// llvm.is.constant would evaluate. +bool CallAnalyzer::simplifyIntrinsicCallIsConstant(CallBase &CB) { + Value *Arg = CB.getArgOperand(0); + auto *C = dyn_cast(Arg); + + if (!C) + C = dyn_cast_or_null(SimplifiedValues.lookup(Arg)); + + Type *RT = CB.getFunctionType()->getReturnType(); + SimplifiedValues[&CB] = ConstantInt::get(RT, C ? 1 : 0); + return true; +} + bool CallAnalyzer::visitBitCast(BitCastInst &I) { // Propagate constants through bitcasts. if (simplifyInstruction(I, [&](SmallVectorImpl &COps) { @@ -2154,6 +2176,8 @@ bool CallAnalyzer::visitCallBase(CallBase &Call) { if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0))) SROAArgValues[II] = SROAArg; return true; + case Intrinsic::is_constant: + return simplifyIntrinsicCallIsConstant(Call); } } diff --git a/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll new file mode 100644 index 0000000..3c96267 --- /dev/null +++ b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll @@ -0,0 +1,39 @@ +; RUN: opt %s -passes=inline -inline-threshold=20 -S | FileCheck %s + +declare i1 @llvm.is.constant.i64(i64) +declare void @foo() + +define void @callee(i64 %val) { + %cond = call i1 @llvm.is.constant.i64(i64 %val) + br i1 %cond, label %cond.true, label %cond.false + +cond.true: +; Rack up costs with a couple of function calls so that this function +; gets inlined only when @llvm.is.constant.i64 is folded. In reality, +; the "then" clause of __builtin_constant_p tends to have statements +; that fold very well, so the cost of the "then" clause is not a huge +; concern. + call void @foo() + call void @foo() + ret void + +cond.false: + ret void +} + +define void @caller(i64 %val) { +; CHECK-LABEL: @caller( +; CHECK-NEXT: [[COND_I:%.*]] = call i1 @llvm.is.constant.i64(i64 [[VAL:%.*]]) +; CHECK-NEXT: br i1 [[COND_I]], label %[[COND_TRUE_I:.*]], label %[[COND_FALSE_I:.*]] +; CHECK: [[COND_TRUE_I]]: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label %[[CALLEE_EXIT:.*]] +; CHECK: [[COND_FALSE_I]]: +; CHECK-NEXT: br label %[[CALLEE_EXIT]] +; CHECK: [[CALLEE_EXIT]]: +; CHECK-NEXT: ret void +; + call void @callee(i64 %val) + ret void +}