From: Jun Bum Lim Date: Mon, 1 Feb 2016 20:55:11 +0000 (+0000) Subject: Avoid inlining call sites in unreachable-terminated block X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=53907161cc20292fd7de53e8e1f0a7ff105076a7;p=platform%2Fupstream%2Fllvm.git Avoid inlining call sites in unreachable-terminated block Summary: If the normal destination of the invoke or the parent block of the call site is unreachable-terminated, there is little point in inlining the call site unless there is literally zero cost. Unlike my previous change (D15289), this change specifically handle the call sites followed by unreachable in the same basic block for call or in the normal destination for the invoke. This change could be a reasonable first step to conservatively inline call sites leading to an unreachable-terminated block while BFI / BPI is not yet available in inliner. Reviewers: manmanren, majnemer, hfinkel, davidxl, mcrosier, dblaikie, eraman Subscribers: dblaikie, davidxl, mcrosier, llvm-commits Differential Revision: http://reviews.llvm.org/D16616 llvm-svn: 259403 --- diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 18d65a0..387077b 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -1214,15 +1214,26 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { if (OnlyOneCallAndLocalLinkage) Cost += InlineConstants::LastCallToStaticBonus; - // If the instruction after the call, or if the normal destination of the - // invoke is an unreachable instruction, the function is noreturn. As such, - // there is little point in inlining this unless there is literally zero - // cost. + // If the normal destination of the invoke or the parent block of the call + // site is unreachable-terminated, there is little point in inlining this + // unless there is literally zero cost. + // FIXME: Note that it is possible that an unreachable-terminated block has a + // hot entry. For example, in below scenario inlining hot_call_X() may be + // beneficial : + // main() { + // hot_call_1(); + // ... + // hot_call_N() + // exit(0); + // } + // For now, we are not handling this corner case here as it is rare in real + // code. In future, we should elaborate this based on BPI and BFI in more + // general threshold adjusting heuristics in updateThreshold(). Instruction *Instr = CS.getInstruction(); if (InvokeInst *II = dyn_cast(Instr)) { - if (isa(II->getNormalDest()->begin())) + if (isa(II->getNormalDest()->getTerminator())) Threshold = 0; - } else if (isa(++BasicBlock::iterator(Instr))) + } else if (isa(Instr->getParent()->getTerminator())) Threshold = 0; // If this function uses the coldcc calling convention, prefer not to inline diff --git a/llvm/test/Transforms/Inline/inline_unreachable.ll b/llvm/test/Transforms/Inline/inline_unreachable.ll new file mode 100644 index 0000000..dbf0119 --- /dev/null +++ b/llvm/test/Transforms/Inline/inline_unreachable.ll @@ -0,0 +1,130 @@ +; RUN: opt < %s -inline -S | FileCheck %s + +@a = global i32 4 +@_ZTIi = external global i8* + +; CHECK-LABEL: callSimpleFunction +; CHECK: call i32 @simpleFunction +define i32 @callSimpleFunction(i32 %idx, i32 %limit) { +entry: + %cmp = icmp sge i32 %idx, %limit + br i1 %cmp, label %if.then, label %if.end + +if.then: + %s = call i32 @simpleFunction(i32 %idx) + store i32 %s, i32* @a + unreachable + +if.end: + ret i32 %idx +} + +; CHECK-LABEL: callSmallFunction +; CHECK-NOT: call i32 @smallFunction +define i32 @callSmallFunction(i32 %idx, i32 %limit) { +entry: + %cmp = icmp sge i32 %idx, %limit + br i1 %cmp, label %if.then, label %if.end + +if.then: + %s = call i32 @smallFunction(i32 %idx) + store i32 %s, i32* @a + unreachable + +if.end: + ret i32 %idx +} + +; CHECK-LABEL: throwSimpleException +; CHECK: invoke i32 @simpleFunction +define i32 @throwSimpleException(i32 %idx, i32 %limit) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + %cmp = icmp sge i32 %idx, %limit + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + %exception = call i8* @__cxa_allocate_exception(i64 1) #0 + invoke i32 @simpleFunction(i32 %idx) + to label %invoke.cont unwind label %lpad + +invoke.cont: ; preds = %if.then + call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #1 + unreachable + +lpad: ; preds = %if.then + %ll = landingpad { i8*, i32 } + cleanup + ret i32 %idx + +if.end: ; preds = %entry + ret i32 %idx +} + +; CHECK-LABEL: throwSmallException +; CHECK-NOT: invoke i32 @smallFunction +define i32 @throwSmallException(i32 %idx, i32 %limit) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + %cmp = icmp sge i32 %idx, %limit + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + %exception = call i8* @__cxa_allocate_exception(i64 1) #0 + invoke i32 @smallFunction(i32 %idx) + to label %invoke.cont unwind label %lpad + +invoke.cont: ; preds = %if.then + call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #1 + unreachable + +lpad: ; preds = %if.then + %ll = landingpad { i8*, i32 } + cleanup + ret i32 %idx + +if.end: ; preds = %entry + ret i32 %idx +} + +define i32 @simpleFunction(i32 %a) #0 { +entry: + %a1 = load volatile i32, i32* @a + %x1 = add i32 %a1, %a1 + %a2 = load volatile i32, i32* @a + %x2 = add i32 %x1, %a2 + %a3 = load volatile i32, i32* @a + %x3 = add i32 %x2, %a3 + %a4 = load volatile i32, i32* @a + %x4 = add i32 %x3, %a4 + %a5 = load volatile i32, i32* @a + %x5 = add i32 %x4, %a5 + %a6 = load volatile i32, i32* @a + %x6 = add i32 %x5, %a6 + %a7 = load volatile i32, i32* @a + %x7 = add i32 %x6, %a6 + %a8 = load volatile i32, i32* @a + %x8 = add i32 %x7, %a8 + %a9 = load volatile i32, i32* @a + %x9 = add i32 %x8, %a9 + %a10 = load volatile i32, i32* @a + %x10 = add i32 %x9, %a10 + %a11 = load volatile i32, i32* @a + %x11 = add i32 %x10, %a11 + %a12 = load volatile i32, i32* @a + %x12 = add i32 %x11, %a12 + %add = add i32 %x12, %a + ret i32 %add +} + +define i32 @smallFunction(i32 %a) { +entry: + %r = load volatile i32, i32* @a + ret i32 %r +} + +attributes #0 = { nounwind } +attributes #1 = { noreturn } + +declare i8* @__cxa_allocate_exception(i64) +declare i32 @__gxx_personality_v0(...) +declare void @__cxa_throw(i8*, i8*, i8*) + diff --git a/llvm/test/Transforms/JumpThreading/pr26096.ll b/llvm/test/Transforms/JumpThreading/pr26096.ll index 2671e82..096d43e 100644 --- a/llvm/test/Transforms/JumpThreading/pr26096.ll +++ b/llvm/test/Transforms/JumpThreading/pr26096.ll @@ -10,19 +10,24 @@ entry: br i1 %B, label %if.end, label %if.then if.then: ; preds = %entry - call void @fn2() + call void @fn2(i1 %B) ret void if.end: ; preds = %entry - call void @fn2() + call void @fn2(i1 %B) ret void } -define internal void @fn2() unnamed_addr { +define internal void @fn2(i1 %B) unnamed_addr { entry: call void @fn1() call void @fn1() call void @fn1() + br i1 %B, label %if.end, label %if.then +if.then: + unreachable + +if.end: unreachable }