1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
39 The inliner itself is split into two passes:
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
95 #include "coretypes.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
108 #include "tree-pass.h"
109 #include "coverage.h"
112 #include "tree-flow.h"
113 #include "ipa-prop.h"
116 #include "ipa-inline.h"
118 /* Statistics we collect about inlining algorithm. */
119 static int overall_size;
120 static gcov_type max_count, max_benefit;
122 /* Return false when inlining edge E would lead to violating
123 limits on function unit growth or stack usage growth.
125 The relative function body growth limit is present generally
126 to avoid problems with non-linear behavior of the compiler.
127 To allow inlining huge functions into tiny wrapper, the limit
128 is always based on the bigger of the two functions considered.
130 For stack growth limits we always base the growth in stack usage
131 of the callers. We want to prevent applications from segfaulting
132 on stack overflow when functions with huge stack frames gets
136 caller_growth_limits (struct cgraph_edge *e)
138 struct cgraph_node *to = e->caller;
139 struct cgraph_node *what = e->callee;
142 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
143 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
145 /* Look for function e->caller is inlined to. While doing
146 so work out the largest function body on the way. As
147 described above, we want to base our function growth
148 limits based on that. Not on the self size of the
149 outer function, not on the self size of inline code
150 we immediately inline to. This is the most relaxed
151 interpretation of the rule "do not grow large functions
152 too much in order to prevent compiler from exploding". */
155 info = inline_summary (to);
156 if (limit < info->self_size)
157 limit = info->self_size;
158 if (stack_size_limit < info->estimated_self_stack_size)
159 stack_size_limit = info->estimated_self_stack_size;
160 if (to->global.inlined_to)
161 to = to->callers->caller;
163 while (to->global.inlined_to);
165 what_info = inline_summary (what);
167 if (limit < what_info->self_size)
168 limit = what_info->self_size;
170 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
172 /* Check the size after inlining against the function limits. But allow
173 the function to shrink if it went over the limits by forced inlining. */
174 newsize = estimate_size_after_inlining (to, e);
175 if (newsize >= info->size
176 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
179 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
183 /* FIXME: Stack size limit often prevents inlining in Fortran programs
184 due to large i/o datastructures used by the Fortran front-end.
185 We ought to ignore this limit when we know that the edge is executed
186 on every invocation of the caller (i.e. its call statement dominates
187 exit block). We do not track this information, yet. */
188 stack_size_limit += (stack_size_limit
189 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
191 inlined_stack = (outer_info->stack_frame_offset
192 + outer_info->estimated_self_stack_size
193 + what_info->estimated_stack_size);
194 /* Check new stack consumption with stack consumption at the place
196 if (inlined_stack > stack_size_limit
197 /* If function already has large stack usage from sibling
198 inline call, we can inline, too.
199 This bit overoptimistically assume that we are good at stack
201 && inlined_stack > info->estimated_stack_size
202 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
204 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
210 /* Dump info about why inlining has failed. */
213 report_inline_failed_reason (struct cgraph_edge *e)
217 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
218 cgraph_node_name (e->caller), e->caller->uid,
219 cgraph_node_name (e->callee), e->callee->uid,
220 cgraph_inline_failed_string (e->inline_failed));
224 /* Decide if we can inline the edge and possibly update
225 inline_failed reason.
226 We check whether inlining is possible at all and whether
227 caller growth limits allow doing so.
229 if REPORT is true, output reason to the dump file. */
232 can_inline_edge_p (struct cgraph_edge *e, bool report)
234 bool inlinable = true;
235 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
236 tree callee_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->callee->decl);
238 gcc_assert (e->inline_failed);
240 if (!e->callee->analyzed)
242 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
245 else if (!inline_summary (e->callee)->inlinable)
247 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
250 else if (cgraph_function_body_availability (e->callee) <= AVAIL_OVERWRITABLE)
252 e->inline_failed = CIF_OVERWRITABLE;
255 else if (e->call_stmt_cannot_inline_p)
257 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
260 /* Don't inline if the functions have different EH personalities. */
261 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
262 && DECL_FUNCTION_PERSONALITY (e->callee->decl)
263 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
264 != DECL_FUNCTION_PERSONALITY (e->callee->decl)))
266 e->inline_failed = CIF_EH_PERSONALITY;
269 /* Don't inline if the callee can throw non-call exceptions but the
271 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
272 Move the flag into cgraph node or mirror it in the inline summary. */
273 else if (DECL_STRUCT_FUNCTION (e->callee->decl)
274 && DECL_STRUCT_FUNCTION (e->callee->decl)->can_throw_non_call_exceptions
275 && !(DECL_STRUCT_FUNCTION (e->caller->decl)
276 && DECL_STRUCT_FUNCTION (e->caller->decl)->can_throw_non_call_exceptions))
278 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
281 /* Check compatibility of target optimization options. */
282 else if (!targetm.target_option.can_inline_p (e->caller->decl,
285 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
288 /* Check if caller growth allows the inlining. */
289 else if (!DECL_DISREGARD_INLINE_LIMITS (e->callee->decl)
290 && !caller_growth_limits (e))
292 /* Don't inline a function with a higher optimization level than the
293 caller. FIXME: this is really just tip of iceberg of handling
294 optimization attribute. */
295 else if (caller_tree != callee_tree)
297 struct cl_optimization *caller_opt
298 = TREE_OPTIMIZATION ((caller_tree)
300 : optimization_default_node);
302 struct cl_optimization *callee_opt
303 = TREE_OPTIMIZATION ((callee_tree)
305 : optimization_default_node);
307 if ((caller_opt->x_optimize > callee_opt->x_optimize)
308 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
310 e->inline_failed = CIF_TARGET_OPTIMIZATION_MISMATCH;
315 /* Be sure that the cannot_inline_p flag is up to date. */
316 gcc_checking_assert (!e->call_stmt
317 || (gimple_call_cannot_inline_p (e->call_stmt)
318 == e->call_stmt_cannot_inline_p)
319 /* In -flto-partition=none mode we really keep things out of
320 sync because call_stmt_cannot_inline_p is set at cgraph
321 merging when function bodies are not there yet. */
322 || (in_lto_p && !gimple_call_cannot_inline_p (e->call_stmt)));
323 if (!inlinable && report)
324 report_inline_failed_reason (e);
329 /* Return true if the edge E is inlinable during early inlining. */
332 can_early_inline_edge_p (struct cgraph_edge *e)
334 /* Early inliner might get called at WPA stage when IPA pass adds new
335 function. In this case we can not really do any of early inlining
336 because function bodies are missing. */
337 if (!gimple_has_body_p (e->callee->decl))
339 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
342 /* In early inliner some of callees may not be in SSA form yet
343 (i.e. the callgraph is cyclic and we did not process
344 the callee by early inliner, yet). We don't have CIF code for this
345 case; later we will re-do the decision in the real inliner. */
346 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
347 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
350 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
353 if (!can_inline_edge_p (e, true))
359 /* Return true when N is leaf function. Accept cheap builtins
360 in leaf functions. */
363 leaf_node_p (struct cgraph_node *n)
365 struct cgraph_edge *e;
366 for (e = n->callees; e; e = e->next_callee)
367 if (!is_inexpensive_builtin (e->callee->decl))
373 /* Return true if we are interested in inlining small function. */
376 want_early_inline_function_p (struct cgraph_edge *e)
378 bool want_inline = true;
380 if (DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
382 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
383 && !flag_inline_small_functions)
385 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
386 report_inline_failed_reason (e);
391 int growth = estimate_edge_growth (e);
394 else if (!cgraph_maybe_hot_edge_p (e)
398 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
399 "call is cold and code would grow by %i\n",
400 cgraph_node_name (e->caller), e->caller->uid,
401 cgraph_node_name (e->callee), e->callee->uid,
405 else if (!leaf_node_p (e->callee)
409 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
410 "callee is not leaf and code would grow by %i\n",
411 cgraph_node_name (e->caller), e->caller->uid,
412 cgraph_node_name (e->callee), e->callee->uid,
416 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
419 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
420 "growth %i exceeds --param early-inlining-insns\n",
421 cgraph_node_name (e->caller), e->caller->uid,
422 cgraph_node_name (e->callee), e->callee->uid,
430 /* Return true if we are interested in inlining small function.
431 When REPORT is true, report reason to dump file. */
434 want_inline_small_function_p (struct cgraph_edge *e, bool report)
436 bool want_inline = true;
438 if (DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
440 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
441 && !flag_inline_small_functions)
443 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
448 int growth = estimate_edge_growth (e);
452 else if (DECL_DECLARED_INLINE_P (e->callee->decl)
453 && growth >= MAX_INLINE_INSNS_SINGLE)
455 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
458 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
459 && !flag_inline_functions)
461 e->inline_failed = CIF_NOT_DECLARED_INLINED;
464 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
465 && growth >= MAX_INLINE_INSNS_AUTO)
467 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
470 else if (!cgraph_maybe_hot_edge_p (e)
471 && estimate_growth (e->callee) > 0)
473 e->inline_failed = CIF_UNLIKELY_CALL;
477 if (!want_inline && report)
478 report_inline_failed_reason (e);
482 /* EDGE is self recursive edge.
483 We hand two cases - when function A is inlining into itself
484 or when function A is being inlined into another inliner copy of function
487 In first case OUTER_NODE points to the toplevel copy of A, while
488 in the second case OUTER_NODE points to the outermost copy of A in B.
490 In both cases we want to be extra selective since
491 inlining the call will just introduce new recursive calls to appear. */
494 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
495 struct cgraph_node *outer_node,
499 char const *reason = NULL;
500 bool want_inline = true;
501 int caller_freq = CGRAPH_FREQ_BASE;
502 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
504 if (DECL_DECLARED_INLINE_P (edge->callee->decl))
505 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
507 if (!cgraph_maybe_hot_edge_p (edge))
509 reason = "recursive call is cold";
512 else if (max_count && !outer_node->count)
514 reason = "not executed in profile";
517 else if (depth > max_depth)
519 reason = "--param max-inline-recursive-depth exceeded.";
523 if (outer_node->global.inlined_to)
524 caller_freq = outer_node->callers->frequency;
528 /* Inlining of self recursive function into copy of itself within other function
529 is transformation similar to loop peeling.
531 Peeling is profitable if we can inline enough copies to make probability
532 of actual call to the self recursive function very small. Be sure that
533 the probability of recursion is small.
535 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
536 This way the expected number of recision is at most max_depth. */
539 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
542 for (i = 1; i < depth; i++)
543 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
545 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
548 reason = "profile of recursive call is too large";
552 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
555 reason = "frequency of recursive call is too large";
559 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
560 depth is large. We reduce function call overhead and increase chances that
561 things fit in hardware return predictor.
563 Recursive inlining might however increase cost of stack frame setup
564 actually slowing down functions whose recursion tree is wide rather than
567 Deciding reliably on when to do recursive inlining without profile feedback
568 is tricky. For now we disable recursive inlining when probability of self
571 Recursive inlining of self recursive call within loop also results in large loop
572 depths that generally optimize badly. We may want to throttle down inlining
573 in those cases. In particular this seems to happen in one of libstdc++ rb tree
578 && (edge->count * 100 / outer_node->count
579 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
581 reason = "profile of recursive call is too small";
585 && (edge->frequency * 100 / caller_freq
586 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
588 reason = "frequency of recursive call is too small";
592 if (!want_inline && dump_file)
593 fprintf (dump_file, " not inlining recursively: %s\n", reason);
598 /* Decide if NODE is called once inlining it would eliminate need
599 for the offline copy of function. */
602 want_inline_function_called_once_p (struct cgraph_node *node)
604 /* Already inlined? */
605 if (node->global.inlined_to)
607 /* Zero or more then one callers? */
609 || node->callers->next_caller)
611 /* Recursive call makes no sense to inline. */
612 if (node->callers->caller == node)
614 /* External functions are not really in the unit, so inlining
615 them when called once would just increase the program size. */
616 if (DECL_EXTERNAL (node->decl))
618 /* Offline body must be optimized out. */
619 if (!cgraph_will_be_removed_from_program_if_no_direct_calls (node))
621 if (!can_inline_edge_p (node->callers, true))
626 /* A cost model driving the inlining heuristics in a way so the edges with
627 smallest badness are inlined first. After each inlining is performed
628 the costs of all caller edges of nodes affected are recomputed so the
629 metrics may accurately depend on values such as number of inlinable callers
630 of the function or function body size. */
633 edge_badness (struct cgraph_edge *edge, bool dump)
637 struct inline_summary *callee_info = inline_summary (edge->callee);
639 if (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl))
642 growth = estimate_edge_growth (edge);
646 fprintf (dump_file, " Badness calculation for %s -> %s\n",
647 cgraph_node_name (edge->caller),
648 cgraph_node_name (edge->callee));
649 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
652 callee_info->time_inlining_benefit
653 + edge->call_stmt_time,
655 callee_info->size_inlining_benefit
656 + edge->call_stmt_size);
659 /* Always prefer inlining saving code size. */
662 badness = INT_MIN - growth;
664 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
668 /* When profiling is available, base priorities -(#calls / growth).
669 So we optimize for overall number of "executed" inlined calls. */
674 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
675 (callee_info->time_inlining_benefit
676 + edge->call_stmt_time + 1)) / growth;
678 /* Be sure that insanity of the profile won't lead to increasing counts
679 in the scalling and thus to overflow in the computation above. */
680 gcc_assert (max_count >= edge->count);
684 " %i (relative %f): profile info. Relative count %f"
685 " * Relative benefit %f\n",
686 (int) badness, (double) badness / INT_MIN,
687 (double) edge->count / max_count,
688 (double) (inline_summary (edge->callee)->
689 time_inlining_benefit
690 + edge->call_stmt_time + 1) / (max_benefit + 1));
694 /* When function local profile is available, base priorities on
695 growth / frequency, so we optimize for overall frequency of inlined
696 calls. This is not too accurate since while the call might be frequent
697 within function, the function itself is infrequent.
699 Other objective to optimize for is number of different calls inlined.
700 We add the estimated growth after inlining all functions to bias the
701 priorities slightly in this direction (so fewer times called functions
702 of the same size gets priority). */
703 else if (flag_guess_branch_prob)
705 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
708 badness = growth * 10000;
710 100 * (callee_info->time_inlining_benefit
711 + edge->call_stmt_time)
712 / (callee_info->time + 1) + 1;
713 benefitperc = MIN (benefitperc, 100);
716 /* Decrease badness if call is nested. */
717 /* Compress the range so we don't overflow. */
719 div = 10000 + ceil_log2 (div) - 8;
724 growth_for_all = estimate_growth (edge->callee);
725 badness += growth_for_all;
726 if (badness > INT_MAX)
731 " %i: guessed profile. frequency %i, overall growth %i,"
732 " benefit %i%%, divisor %i\n",
733 (int) badness, edge->frequency, growth_for_all,
737 /* When function local profile is not available or it does not give
738 useful information (ie frequency is zero), base the cost on
739 loop nest and overall size growth, so we optimize for overall number
740 of functions fully inlined in program. */
743 int nest = MIN (edge->loop_nest, 8);
744 badness = estimate_growth (edge->callee) * 256;
746 /* Decrease badness if call is nested. */
754 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
758 /* Ensure that we did not overflow in all the fixed point math above. */
759 gcc_assert (badness >= INT_MIN);
760 gcc_assert (badness <= INT_MAX - 1);
761 /* Make recursive inlining happen always after other inlining is done. */
762 if (cgraph_edge_recursive_p (edge))
768 /* Recompute badness of EDGE and update its key in HEAP if needed. */
770 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
772 int badness = edge_badness (edge, false);
775 fibnode_t n = (fibnode_t) edge->aux;
776 gcc_checking_assert (n->data == edge);
778 /* fibheap_replace_key only decrease the keys.
779 When we increase the key we do not update heap
780 and instead re-insert the element once it becomes
781 a minimum of heap. */
782 if (badness < n->key)
784 fibheap_replace_key (heap, n, badness);
785 if (dump_file && (dump_flags & TDF_DETAILS))
788 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
789 cgraph_node_name (edge->caller), edge->caller->uid,
790 cgraph_node_name (edge->callee), edge->callee->uid,
794 gcc_checking_assert (n->key == badness);
799 if (dump_file && (dump_flags & TDF_DETAILS))
802 " enqueuing call %s/%i -> %s/%i, badness %i\n",
803 cgraph_node_name (edge->caller), edge->caller->uid,
804 cgraph_node_name (edge->callee), edge->callee->uid,
807 edge->aux = fibheap_insert (heap, badness, edge);
811 /* Recompute heap nodes for each of caller edge. */
814 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
815 bitmap updated_nodes)
817 struct cgraph_edge *edge;
819 if (!inline_summary (node)->inlinable
820 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
821 || node->global.inlined_to)
823 if (!bitmap_set_bit (updated_nodes, node->uid))
825 inline_summary (node)->estimated_growth = INT_MIN;
827 /* See if there is something to do. */
828 for (edge = node->callers; edge; edge = edge->next_caller)
829 if (edge->inline_failed)
834 for (; edge; edge = edge->next_caller)
835 if (edge->inline_failed)
837 if (can_inline_edge_p (edge, false)
838 && want_inline_small_function_p (edge, false))
839 update_edge_key (heap, edge);
842 report_inline_failed_reason (edge);
843 fibheap_delete_node (heap, (fibnode_t) edge->aux);
849 /* Recompute heap nodes for each uninlined call.
850 This is used when we know that edge badnesses are going only to increase
851 (we introduced new call site) and thus all we need is to insert newly
852 created edges into heap. */
855 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
856 bitmap updated_nodes)
858 struct cgraph_edge *e = node->callees;
860 inline_summary (node)->estimated_growth = INT_MIN;
865 if (!e->inline_failed && e->callee->callees)
866 e = e->callee->callees;
870 && inline_summary (e->callee)->inlinable
871 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
872 && !bitmap_bit_p (updated_nodes, e->callee->uid))
874 inline_summary (node)->estimated_growth = INT_MIN;
875 update_edge_key (heap, e);
883 if (e->caller == node)
885 e = e->caller->callers;
887 while (!e->next_callee);
893 /* Recompute heap nodes for each of caller edges of each of callees.
894 Walk recursively into all inline clones. */
897 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
898 bitmap updated_nodes)
900 struct cgraph_edge *e = node->callees;
902 inline_summary (node)->estimated_growth = INT_MIN;
907 if (!e->inline_failed && e->callee->callees)
908 e = e->callee->callees;
911 if (e->inline_failed)
912 update_caller_keys (heap, e->callee, updated_nodes);
919 if (e->caller == node)
921 e = e->caller->callers;
923 while (!e->next_callee);
929 /* Enqueue all recursive calls from NODE into priority queue depending on
930 how likely we want to recursively inline the call. */
933 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
936 struct cgraph_edge *e;
937 for (e = where->callees; e; e = e->next_callee)
938 if (e->callee == node)
940 /* When profile feedback is available, prioritize by expected number
942 fibheap_insert (heap,
943 !max_count ? -e->frequency
944 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
947 for (e = where->callees; e; e = e->next_callee)
948 if (!e->inline_failed)
949 lookup_recursive_calls (node, e->callee, heap);
952 /* Decide on recursive inlining: in the case function has recursive calls,
953 inline until body size reaches given argument. If any new indirect edges
954 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
958 recursive_inlining (struct cgraph_edge *edge,
959 VEC (cgraph_edge_p, heap) **new_edges)
961 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
963 struct cgraph_node *node;
964 struct cgraph_edge *e;
965 struct cgraph_node *master_clone = NULL, *next;
970 if (node->global.inlined_to)
971 node = node->global.inlined_to;
973 if (DECL_DECLARED_INLINE_P (node->decl))
974 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
976 /* Make sure that function is small enough to be considered for inlining. */
977 if (estimate_size_after_inlining (node, edge) >= limit)
979 heap = fibheap_new ();
980 lookup_recursive_calls (node, node, heap);
981 if (fibheap_empty (heap))
983 fibheap_delete (heap);
989 " Performing recursive inlining on %s\n",
990 cgraph_node_name (node));
992 /* Do the inlining and update list of recursive call during process. */
993 while (!fibheap_empty (heap))
995 struct cgraph_edge *curr
996 = (struct cgraph_edge *) fibheap_extract_min (heap);
997 struct cgraph_node *cnode;
999 if (estimate_size_after_inlining (node, curr) > limit)
1002 if (!can_inline_edge_p (curr, true))
1006 for (cnode = curr->caller;
1007 cnode->global.inlined_to; cnode = cnode->callers->caller)
1008 if (node->decl == curr->callee->decl)
1011 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1017 " Inlining call of depth %i", depth);
1020 fprintf (dump_file, " called approx. %.2f times per call",
1021 (double)curr->count / node->count);
1023 fprintf (dump_file, "\n");
1027 /* We need original clone to copy around. */
1028 master_clone = cgraph_clone_node (node, node->decl,
1029 node->count, CGRAPH_FREQ_BASE, 1,
1031 for (e = master_clone->callees; e; e = e->next_callee)
1032 if (!e->inline_failed)
1033 clone_inlined_nodes (e, true, false, NULL);
1036 cgraph_redirect_edge_callee (curr, master_clone);
1037 inline_call (curr, false, new_edges, &overall_size);
1038 lookup_recursive_calls (node, curr->callee, heap);
1042 if (!fibheap_empty (heap) && dump_file)
1043 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1044 fibheap_delete (heap);
1051 "\n Inlined %i times, "
1052 "body grown from size %i to %i, time %i to %i\n", n,
1053 inline_summary (master_clone)->size, inline_summary (node)->size,
1054 inline_summary (master_clone)->time, inline_summary (node)->time);
1056 /* Remove master clone we used for inlining. We rely that clones inlined
1057 into master clone gets queued just before master clone so we don't
1059 for (node = cgraph_nodes; node != master_clone;
1063 if (node->global.inlined_to == master_clone)
1064 cgraph_remove_node (node);
1066 cgraph_remove_node (master_clone);
1071 /* Given whole compilation unit estimate of INSNS, compute how large we can
1072 allow the unit to grow. */
1075 compute_max_insns (int insns)
1077 int max_insns = insns;
1078 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1079 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1081 return ((HOST_WIDEST_INT) max_insns
1082 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1086 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1089 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1091 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1093 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1095 gcc_assert (!edge->aux);
1096 if (inline_summary (edge->callee)->inlinable
1097 && edge->inline_failed
1098 && can_inline_edge_p (edge, true)
1099 && want_inline_small_function_p (edge, true))
1100 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1105 /* We use greedy algorithm for inlining of small functions:
1106 All inline candidates are put into prioritized heap ordered in
1109 The inlining of small functions is bounded by unit growth parameters. */
1112 inline_small_functions (void)
1114 struct cgraph_node *node;
1115 struct cgraph_edge *edge;
1116 fibheap_t heap = fibheap_new ();
1117 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1118 int min_size, max_size;
1119 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1120 int initial_size = 0;
1122 if (flag_indirect_inlining)
1123 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1127 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1130 /* Compute overall unit size and other global parameters used by badness
1136 for (node = cgraph_nodes; node; node = node->next)
1138 && !node->global.inlined_to)
1140 struct inline_summary *info = inline_summary (node);
1142 info->estimated_growth = INT_MIN;
1144 if (!DECL_EXTERNAL (node->decl))
1145 initial_size += info->size;
1147 for (edge = node->callers; edge; edge = edge->next_caller)
1149 int benefit = (info->time_inlining_benefit
1150 + edge->call_stmt_time);
1151 if (max_count < edge->count)
1152 max_count = edge->count;
1153 if (max_benefit < benefit)
1154 max_benefit = benefit;
1158 overall_size = initial_size;
1159 max_size = compute_max_insns (overall_size);
1160 min_size = overall_size;
1162 /* Populate the heeap with all edges we might inline. */
1164 for (node = cgraph_nodes; node; node = node->next)
1166 && !node->global.inlined_to)
1169 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1170 cgraph_node_name (node), node->uid);
1172 for (edge = node->callers; edge; edge = edge->next_caller)
1173 if (edge->inline_failed
1174 && can_inline_edge_p (edge, true)
1175 && want_inline_small_function_p (edge, true)
1176 && edge->inline_failed)
1178 gcc_assert (!edge->aux);
1179 update_edge_key (heap, edge);
1183 gcc_assert (in_lto_p
1185 || (profile_info && flag_branch_probabilities));
1187 while (!fibheap_empty (heap))
1189 int old_size = overall_size;
1190 struct cgraph_node *where, *callee;
1191 int badness = fibheap_min_key (heap);
1192 int current_badness;
1195 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1196 gcc_assert (edge->aux);
1198 if (!edge->inline_failed)
1201 /* When updating the edge costs, we only decrease badness in the keys.
1202 Increases of badness are handled lazilly; when we see key with out
1203 of date value on it, we re-insert it now. */
1204 current_badness = edge_badness (edge, false);
1205 gcc_assert (current_badness >= badness);
1206 if (current_badness != badness)
1208 edge->aux = fibheap_insert (heap, current_badness, edge);
1212 if (!can_inline_edge_p (edge, true))
1215 callee = edge->callee;
1216 growth = estimate_edge_growth (edge);
1220 "\nConsidering %s with %i size\n",
1221 cgraph_node_name (edge->callee),
1222 inline_summary (edge->callee)->size);
1224 " to be inlined into %s in %s:%i\n"
1225 " Estimated growth after inlined into all is %+i insns.\n"
1226 " Estimated badness is %i, frequency %.2f.\n",
1227 cgraph_node_name (edge->caller),
1228 flag_wpa ? "unknown"
1229 : gimple_filename ((const_gimple) edge->call_stmt),
1231 : gimple_lineno ((const_gimple) edge->call_stmt),
1232 estimate_growth (edge->callee),
1234 edge->frequency / (double)CGRAPH_FREQ_BASE);
1236 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1238 if (dump_flags & TDF_DETAILS)
1239 edge_badness (edge, true);
1242 if (overall_size + growth > max_size
1243 && !DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl))
1245 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1246 report_inline_failed_reason (edge);
1250 if (!want_inline_small_function_p (edge, true))
1253 /* Heuristics for inlining small functions works poorly for
1254 recursive calls where we do efect similar to loop unrolling.
1255 When inliing such edge seems profitable, leave decision on
1256 specific inliner. */
1257 if (cgraph_edge_recursive_p (edge))
1259 where = edge->caller;
1260 if (where->global.inlined_to)
1261 where = where->global.inlined_to;
1262 if (!recursive_inlining (edge,
1263 flag_indirect_inlining
1264 ? &new_indirect_edges : NULL))
1266 edge->inline_failed = CIF_RECURSIVE_INLINING;
1269 /* Recursive inliner inlines all recursive calls of the function
1270 at once. Consequently we need to update all callee keys. */
1271 if (flag_indirect_inlining)
1272 add_new_edges_to_heap (heap, new_indirect_edges);
1273 update_all_callee_keys (heap, where, updated_nodes);
1277 struct cgraph_node *callee;
1278 struct cgraph_node *outer_node = NULL;
1281 /* Consider the case where self recursive function A is inlined into B.
1282 This is desired optimization in some cases, since it leads to effect
1283 similar of loop peeling and we might completely optimize out the
1284 recursive call. However we must be extra selective. */
1286 where = edge->caller;
1287 while (where->global.inlined_to)
1289 if (where->decl == edge->callee->decl)
1290 outer_node = where, depth++;
1291 where = where->callers->caller;
1294 && !want_inline_self_recursive_call_p (edge, outer_node,
1298 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1299 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1302 else if (depth && dump_file)
1303 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1305 callee = edge->callee;
1306 gcc_checking_assert (!callee->global.inlined_to);
1307 inline_call (edge, true, &new_indirect_edges, &overall_size);
1308 if (flag_indirect_inlining)
1309 add_new_edges_to_heap (heap, new_indirect_edges);
1311 /* We inlined last offline copy to the body. This might lead
1312 to callees of function having fewer call sites and thus they
1313 may need updating. */
1314 if (callee->global.inlined_to)
1315 update_all_callee_keys (heap, callee, updated_nodes);
1317 update_callee_keys (heap, edge->callee, updated_nodes);
1319 where = edge->caller;
1320 if (where->global.inlined_to)
1321 where = where->global.inlined_to;
1323 /* Our profitability metric can depend on local properties
1324 such as number of inlinable calls and size of the function body.
1325 After inlining these properties might change for the function we
1326 inlined into (since it's body size changed) and for the functions
1327 called by function we inlined (since number of it inlinable callers
1329 update_caller_keys (heap, where, updated_nodes);
1331 /* We removed one call of the function we just inlined. If offline
1332 copy is still needed, be sure to update the keys. */
1333 if (callee != where && !callee->global.inlined_to)
1334 update_caller_keys (heap, callee, updated_nodes);
1335 bitmap_clear (updated_nodes);
1340 " Inlined into %s which now has time %i and size %i,"
1341 "net change of %+i.\n",
1342 cgraph_node_name (edge->caller),
1343 inline_summary (edge->caller)->time,
1344 inline_summary (edge->caller)->size,
1345 overall_size - old_size);
1347 if (min_size > overall_size)
1349 min_size = overall_size;
1350 max_size = compute_max_insns (min_size);
1353 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1357 if (new_indirect_edges)
1358 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1359 fibheap_delete (heap);
1362 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1363 overall_size, initial_size,
1364 overall_size * 100 / (initial_size + 1) - 100);
1365 BITMAP_FREE (updated_nodes);
1368 /* Flatten NODE. Performed both during early inlining and
1369 at IPA inlining time. */
1372 flatten_function (struct cgraph_node *node)
1374 struct cgraph_edge *e;
1376 /* We shouldn't be called recursively when we are being processed. */
1377 gcc_assert (node->aux == NULL);
1379 node->aux = (void *) node;
1381 for (e = node->callees; e; e = e->next_callee)
1383 struct cgraph_node *orig_callee;
1385 /* We've hit cycle? It is time to give up. */
1390 "Not inlining %s into %s to avoid cycle.\n",
1391 cgraph_node_name (e->callee),
1392 cgraph_node_name (e->caller));
1393 e->inline_failed = CIF_RECURSIVE_INLINING;
1397 /* When the edge is already inlined, we just need to recurse into
1398 it in order to fully flatten the leaves. */
1399 if (!e->inline_failed)
1401 flatten_function (e->callee);
1405 /* Flatten attribute needs to be processed during late inlining. For
1406 extra code quality we however do flattening during early optimization,
1408 if (cgraph_state != CGRAPH_STATE_IPA_SSA
1409 ? !can_inline_edge_p (e, true)
1410 : !can_early_inline_edge_p (e))
1413 if (cgraph_edge_recursive_p (e))
1416 fprintf (dump_file, "Not inlining: recursive call.\n");
1420 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1421 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1424 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1428 /* Inline the edge and flatten the inline clone. Avoid
1429 recursing through the original node if the node was cloned. */
1431 fprintf (dump_file, " Inlining %s into %s.\n",
1432 cgraph_node_name (e->callee),
1433 cgraph_node_name (e->caller));
1434 orig_callee = e->callee;
1435 inline_call (e, true, NULL, NULL);
1436 if (e->callee != orig_callee)
1437 orig_callee->aux = (void *) node;
1438 flatten_function (e->callee);
1439 if (e->callee != orig_callee)
1440 orig_callee->aux = NULL;
1446 /* Decide on the inlining. We do so in the topological order to avoid
1447 expenses on updating data structures. */
1452 struct cgraph_node *node;
1454 struct cgraph_node **order =
1455 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1458 if (in_lto_p && flag_indirect_inlining)
1459 ipa_update_after_lto_read ();
1460 if (flag_indirect_inlining)
1461 ipa_create_all_structures_for_iinln ();
1464 dump_inline_summaries (dump_file);
1466 nnodes = cgraph_postorder (order);
1468 for (node = cgraph_nodes; node; node = node->next)
1472 fprintf (dump_file, "\nFlattening functions:\n");
1474 /* In the first pass handle functions to be flattened. Do this with
1475 a priority so none of our later choices will make this impossible. */
1476 for (i = nnodes - 1; i >= 0; i--)
1480 /* Handle nodes to be flattened.
1481 Ideally when processing callees we stop inlining at the
1482 entry of cycles, possibly cloning that entry point and
1483 try to flatten itself turning it into a self-recursive
1485 if (lookup_attribute ("flatten",
1486 DECL_ATTRIBUTES (node->decl)) != NULL)
1490 "Flattening %s\n", cgraph_node_name (node));
1491 flatten_function (node);
1495 inline_small_functions ();
1496 cgraph_remove_unreachable_nodes (true, dump_file);
1499 /* We already perform some inlining of functions called once during
1500 inlining small functions above. After unreachable nodes are removed,
1501 we still might do a quick check that nothing new is found. */
1502 if (flag_inline_functions_called_once)
1506 fprintf (dump_file, "\nDeciding on functions called once:\n");
1508 /* Inlining one function called once has good chance of preventing
1509 inlining other function into the same callee. Ideally we should
1510 work in priority order, but probably inlining hot functions first
1511 is good cut without the extra pain of maintaining the queue.
1513 ??? this is not really fitting the bill perfectly: inlining function
1514 into callee often leads to better optimization of callee due to
1515 increased context for optimization.
1516 For example if main() function calls a function that outputs help
1517 and then function that does the main optmization, we should inline
1518 the second with priority even if both calls are cold by themselves.
1520 We probably want to implement new predicate replacing our use of
1521 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1523 for (cold = 0; cold <= 1; cold ++)
1525 for (node = cgraph_nodes; node; node = node->next)
1527 if (want_inline_function_called_once_p (node)
1529 || cgraph_maybe_hot_edge_p (node->callers)))
1531 struct cgraph_node *caller = node->callers->caller;
1536 "\nInlining %s size %i.\n",
1537 cgraph_node_name (node), inline_summary (node)->size);
1539 " Called once from %s %i insns.\n",
1540 cgraph_node_name (node->callers->caller),
1541 inline_summary (node->callers->caller)->size);
1544 inline_call (node->callers, true, NULL, NULL);
1547 " Inlined into %s which now has %i size\n",
1548 cgraph_node_name (caller),
1549 inline_summary (caller)->size);
1555 /* Free ipa-prop structures if they are no longer needed. */
1556 if (flag_indirect_inlining)
1557 ipa_free_all_structures_after_iinln ();
1561 "\nInlined %i calls, eliminated %i functions\n\n",
1562 ncalls_inlined, nfunctions_inlined);
1564 /* In WPA we use inline summaries for partitioning process. */
1566 inline_free_summary ();
1570 /* Inline always-inline function calls in NODE. */
1573 inline_always_inline_functions (struct cgraph_node *node)
1575 struct cgraph_edge *e;
1576 bool inlined = false;
1578 for (e = node->callees; e; e = e->next_callee)
1580 if (!DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
1583 if (cgraph_edge_recursive_p (e))
1586 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1587 cgraph_node_name (e->callee));
1588 e->inline_failed = CIF_RECURSIVE_INLINING;
1592 if (!can_early_inline_edge_p (e))
1596 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1597 cgraph_node_name (e->callee),
1598 cgraph_node_name (e->caller));
1599 inline_call (e, true, NULL, NULL);
1606 /* Decide on the inlining. We do so in the topological order to avoid
1607 expenses on updating data structures. */
1610 early_inline_small_functions (struct cgraph_node *node)
1612 struct cgraph_edge *e;
1613 bool inlined = false;
1615 for (e = node->callees; e; e = e->next_callee)
1617 if (!inline_summary (e->callee)->inlinable
1618 || !e->inline_failed)
1621 /* Do not consider functions not declared inline. */
1622 if (!DECL_DECLARED_INLINE_P (e->callee->decl)
1623 && !flag_inline_small_functions
1624 && !flag_inline_functions)
1628 fprintf (dump_file, "Considering inline candidate %s.\n",
1629 cgraph_node_name (e->callee));
1631 if (!can_early_inline_edge_p (e))
1634 if (cgraph_edge_recursive_p (e))
1637 fprintf (dump_file, " Not inlining: recursive call.\n");
1641 if (!want_early_inline_function_p (e))
1645 fprintf (dump_file, " Inlining %s into %s.\n",
1646 cgraph_node_name (e->callee),
1647 cgraph_node_name (e->caller));
1648 inline_call (e, true, NULL, NULL);
1655 /* Do inlining of small functions. Doing so early helps profiling and other
1656 passes to be somewhat more effective and avoids some code duplication in
1657 later real inlining pass for testcases with very many function calls. */
1659 early_inliner (void)
1661 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1662 struct cgraph_edge *edge;
1663 unsigned int todo = 0;
1665 bool inlined = false;
1670 #ifdef ENABLE_CHECKING
1671 verify_cgraph_node (node);
1674 /* Even when not optimizing or not inlining inline always-inline
1676 inlined = inline_always_inline_functions (node);
1680 || !flag_early_inlining
1681 /* Never inline regular functions into always-inline functions
1682 during incremental inlining. This sucks as functions calling
1683 always inline functions will get less optimized, but at the
1684 same time inlining of functions calling always inline
1685 function into an always inline function might introduce
1686 cycles of edges to be always inlined in the callgraph.
1688 We might want to be smarter and just avoid this type of inlining. */
1689 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
1691 else if (lookup_attribute ("flatten",
1692 DECL_ATTRIBUTES (node->decl)) != NULL)
1694 /* When the function is marked to be flattened, recursively inline
1698 "Flattening %s\n", cgraph_node_name (node));
1699 flatten_function (node);
1704 /* We iterate incremental inlining to get trivial cases of indirect
1706 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1707 && early_inline_small_functions (node))
1709 timevar_push (TV_INTEGRATION);
1710 todo |= optimize_inline_calls (current_function_decl);
1712 /* Technically we ought to recompute inline parameters so the new
1713 iteration of early inliner works as expected. We however have
1714 values approximately right and thus we only need to update edge
1715 info that might be cleared out for newly discovered edges. */
1716 for (edge = node->callees; edge; edge = edge->next_callee)
1718 edge->call_stmt_size
1719 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1720 edge->call_stmt_time
1721 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1723 timevar_pop (TV_INTEGRATION);
1728 fprintf (dump_file, "Iterations: %i\n", iterations);
1733 timevar_push (TV_INTEGRATION);
1734 todo |= optimize_inline_calls (current_function_decl);
1735 timevar_pop (TV_INTEGRATION);
1738 cfun->always_inline_functions_inlined = true;
1743 struct gimple_opt_pass pass_early_inline =
1747 "einline", /* name */
1749 early_inliner, /* execute */
1752 0, /* static_pass_number */
1753 TV_INLINE_HEURISTICS, /* tv_id */
1754 PROP_ssa, /* properties_required */
1755 0, /* properties_provided */
1756 0, /* properties_destroyed */
1757 0, /* todo_flags_start */
1758 TODO_dump_func /* todo_flags_finish */
1763 /* When to run IPA inlining. Inlining of always-inline functions
1764 happens during early inlining. */
1767 gate_ipa_inline (void)
1769 /* ??? We'd like to skip this if not optimizing or not inlining as
1770 all always-inline functions have been processed by early
1771 inlining already. But this at least breaks EH with C++ as
1772 we need to unconditionally run fixup_cfg even at -O0.
1773 So leave it on unconditionally for now. */
1777 struct ipa_opt_pass_d pass_ipa_inline =
1781 "inline", /* name */
1782 gate_ipa_inline, /* gate */
1783 ipa_inline, /* execute */
1786 0, /* static_pass_number */
1787 TV_INLINE_HEURISTICS, /* tv_id */
1788 0, /* properties_required */
1789 0, /* properties_provided */
1790 0, /* properties_destroyed */
1791 TODO_remove_functions, /* todo_flags_finish */
1792 TODO_dump_cgraph | TODO_dump_func
1793 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
1795 inline_generate_summary, /* generate_summary */
1796 inline_write_summary, /* write_summary */
1797 inline_read_summary, /* read_summary */
1798 NULL, /* write_optimization_summary */
1799 NULL, /* read_optimization_summary */
1800 NULL, /* stmt_fixup */
1802 inline_transform, /* function_transform */
1803 NULL, /* variable_transform */