1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
39 The inliner itself is split into two passes:
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
95 #include "coretypes.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
108 #include "tree-pass.h"
109 #include "coverage.h"
112 #include "tree-flow.h"
113 #include "ipa-prop.h"
116 #include "ipa-inline.h"
117 #include "ipa-utils.h"
119 /* Statistics we collect about inlining algorithm. */
120 static int overall_size;
121 static gcov_type max_count;
123 /* Return false when inlining edge E would lead to violating
124 limits on function unit growth or stack usage growth.
126 The relative function body growth limit is present generally
127 to avoid problems with non-linear behavior of the compiler.
128 To allow inlining huge functions into tiny wrapper, the limit
129 is always based on the bigger of the two functions considered.
131 For stack growth limits we always base the growth in stack usage
132 of the callers. We want to prevent applications from segfaulting
133 on stack overflow when functions with huge stack frames gets
137 caller_growth_limits (struct cgraph_edge *e)
139 struct cgraph_node *to = e->caller;
140 struct cgraph_node *what = e->callee;
143 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
144 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
146 /* Look for function e->caller is inlined to. While doing
147 so work out the largest function body on the way. As
148 described above, we want to base our function growth
149 limits based on that. Not on the self size of the
150 outer function, not on the self size of inline code
151 we immediately inline to. This is the most relaxed
152 interpretation of the rule "do not grow large functions
153 too much in order to prevent compiler from exploding". */
156 info = inline_summary (to);
157 if (limit < info->self_size)
158 limit = info->self_size;
159 if (stack_size_limit < info->estimated_self_stack_size)
160 stack_size_limit = info->estimated_self_stack_size;
161 if (to->global.inlined_to)
162 to = to->callers->caller;
164 while (to->global.inlined_to);
166 what_info = inline_summary (what);
168 if (limit < what_info->self_size)
169 limit = what_info->self_size;
171 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
173 /* Check the size after inlining against the function limits. But allow
174 the function to shrink if it went over the limits by forced inlining. */
175 newsize = estimate_size_after_inlining (to, e);
176 if (newsize >= info->size
177 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
180 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
184 /* FIXME: Stack size limit often prevents inlining in Fortran programs
185 due to large i/o datastructures used by the Fortran front-end.
186 We ought to ignore this limit when we know that the edge is executed
187 on every invocation of the caller (i.e. its call statement dominates
188 exit block). We do not track this information, yet. */
189 stack_size_limit += (stack_size_limit
190 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
192 inlined_stack = (outer_info->stack_frame_offset
193 + outer_info->estimated_self_stack_size
194 + what_info->estimated_stack_size);
195 /* Check new stack consumption with stack consumption at the place
197 if (inlined_stack > stack_size_limit
198 /* If function already has large stack usage from sibling
199 inline call, we can inline, too.
200 This bit overoptimistically assume that we are good at stack
202 && inlined_stack > info->estimated_stack_size
203 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
205 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
211 /* Dump info about why inlining has failed. */
214 report_inline_failed_reason (struct cgraph_edge *e)
218 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
219 cgraph_node_name (e->caller), e->caller->uid,
220 cgraph_node_name (e->callee), e->callee->uid,
221 cgraph_inline_failed_string (e->inline_failed));
225 /* Decide if we can inline the edge and possibly update
226 inline_failed reason.
227 We check whether inlining is possible at all and whether
228 caller growth limits allow doing so.
230 if REPORT is true, output reason to the dump file. */
233 can_inline_edge_p (struct cgraph_edge *e, bool report)
235 bool inlinable = true;
236 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
237 tree callee_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->callee->decl);
239 gcc_assert (e->inline_failed);
241 if (!e->callee->analyzed)
243 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
246 else if (!inline_summary (e->callee)->inlinable)
248 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
251 else if (cgraph_function_body_availability (e->callee) <= AVAIL_OVERWRITABLE)
253 e->inline_failed = CIF_OVERWRITABLE;
256 else if (e->call_stmt_cannot_inline_p)
258 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
261 /* Don't inline if the functions have different EH personalities. */
262 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
263 && DECL_FUNCTION_PERSONALITY (e->callee->decl)
264 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
265 != DECL_FUNCTION_PERSONALITY (e->callee->decl)))
267 e->inline_failed = CIF_EH_PERSONALITY;
270 /* Don't inline if the callee can throw non-call exceptions but the
272 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
273 Move the flag into cgraph node or mirror it in the inline summary. */
274 else if (DECL_STRUCT_FUNCTION (e->callee->decl)
275 && DECL_STRUCT_FUNCTION (e->callee->decl)->can_throw_non_call_exceptions
276 && !(DECL_STRUCT_FUNCTION (e->caller->decl)
277 && DECL_STRUCT_FUNCTION (e->caller->decl)->can_throw_non_call_exceptions))
279 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
282 /* Check compatibility of target optimization options. */
283 else if (!targetm.target_option.can_inline_p (e->caller->decl,
286 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
289 /* Check if caller growth allows the inlining. */
290 else if (!DECL_DISREGARD_INLINE_LIMITS (e->callee->decl)
291 && !caller_growth_limits (e))
293 /* Don't inline a function with a higher optimization level than the
294 caller. FIXME: this is really just tip of iceberg of handling
295 optimization attribute. */
296 else if (caller_tree != callee_tree)
298 struct cl_optimization *caller_opt
299 = TREE_OPTIMIZATION ((caller_tree)
301 : optimization_default_node);
303 struct cl_optimization *callee_opt
304 = TREE_OPTIMIZATION ((callee_tree)
306 : optimization_default_node);
308 if ((caller_opt->x_optimize > callee_opt->x_optimize)
309 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
311 e->inline_failed = CIF_TARGET_OPTIMIZATION_MISMATCH;
316 /* Be sure that the cannot_inline_p flag is up to date. */
317 gcc_checking_assert (!e->call_stmt
318 || (gimple_call_cannot_inline_p (e->call_stmt)
319 == e->call_stmt_cannot_inline_p)
320 /* In -flto-partition=none mode we really keep things out of
321 sync because call_stmt_cannot_inline_p is set at cgraph
322 merging when function bodies are not there yet. */
323 || (in_lto_p && !gimple_call_cannot_inline_p (e->call_stmt)));
324 if (!inlinable && report)
325 report_inline_failed_reason (e);
330 /* Return true if the edge E is inlinable during early inlining. */
333 can_early_inline_edge_p (struct cgraph_edge *e)
335 /* Early inliner might get called at WPA stage when IPA pass adds new
336 function. In this case we can not really do any of early inlining
337 because function bodies are missing. */
338 if (!gimple_has_body_p (e->callee->decl))
340 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
343 /* In early inliner some of callees may not be in SSA form yet
344 (i.e. the callgraph is cyclic and we did not process
345 the callee by early inliner, yet). We don't have CIF code for this
346 case; later we will re-do the decision in the real inliner. */
347 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
348 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
351 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
354 if (!can_inline_edge_p (e, true))
360 /* Return true when N is leaf function. Accept cheap builtins
361 in leaf functions. */
364 leaf_node_p (struct cgraph_node *n)
366 struct cgraph_edge *e;
367 for (e = n->callees; e; e = e->next_callee)
368 if (!is_inexpensive_builtin (e->callee->decl))
374 /* Return true if we are interested in inlining small function. */
377 want_early_inline_function_p (struct cgraph_edge *e)
379 bool want_inline = true;
381 if (DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
383 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
384 && !flag_inline_small_functions)
386 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
387 report_inline_failed_reason (e);
392 int growth = estimate_edge_growth (e);
395 else if (!cgraph_maybe_hot_edge_p (e)
399 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
400 "call is cold and code would grow by %i\n",
401 cgraph_node_name (e->caller), e->caller->uid,
402 cgraph_node_name (e->callee), e->callee->uid,
406 else if (!leaf_node_p (e->callee)
410 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
411 "callee is not leaf and code would grow by %i\n",
412 cgraph_node_name (e->caller), e->caller->uid,
413 cgraph_node_name (e->callee), e->callee->uid,
417 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
420 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
421 "growth %i exceeds --param early-inlining-insns\n",
422 cgraph_node_name (e->caller), e->caller->uid,
423 cgraph_node_name (e->callee), e->callee->uid,
431 /* Return true if we are interested in inlining small function.
432 When REPORT is true, report reason to dump file. */
435 want_inline_small_function_p (struct cgraph_edge *e, bool report)
437 bool want_inline = true;
439 if (DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
441 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
442 && !flag_inline_small_functions)
444 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
449 int growth = estimate_edge_growth (e);
453 else if (DECL_DECLARED_INLINE_P (e->callee->decl)
454 && growth >= MAX_INLINE_INSNS_SINGLE)
456 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
459 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
460 && !flag_inline_functions)
462 e->inline_failed = CIF_NOT_DECLARED_INLINED;
465 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
466 && growth >= MAX_INLINE_INSNS_AUTO)
468 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
471 else if (!cgraph_maybe_hot_edge_p (e)
472 && estimate_growth (e->callee) > 0)
474 e->inline_failed = CIF_UNLIKELY_CALL;
478 if (!want_inline && report)
479 report_inline_failed_reason (e);
483 /* EDGE is self recursive edge.
484 We hand two cases - when function A is inlining into itself
485 or when function A is being inlined into another inliner copy of function
488 In first case OUTER_NODE points to the toplevel copy of A, while
489 in the second case OUTER_NODE points to the outermost copy of A in B.
491 In both cases we want to be extra selective since
492 inlining the call will just introduce new recursive calls to appear. */
495 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
496 struct cgraph_node *outer_node,
500 char const *reason = NULL;
501 bool want_inline = true;
502 int caller_freq = CGRAPH_FREQ_BASE;
503 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
505 if (DECL_DECLARED_INLINE_P (edge->callee->decl))
506 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
508 if (!cgraph_maybe_hot_edge_p (edge))
510 reason = "recursive call is cold";
513 else if (max_count && !outer_node->count)
515 reason = "not executed in profile";
518 else if (depth > max_depth)
520 reason = "--param max-inline-recursive-depth exceeded.";
524 if (outer_node->global.inlined_to)
525 caller_freq = outer_node->callers->frequency;
529 /* Inlining of self recursive function into copy of itself within other function
530 is transformation similar to loop peeling.
532 Peeling is profitable if we can inline enough copies to make probability
533 of actual call to the self recursive function very small. Be sure that
534 the probability of recursion is small.
536 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
537 This way the expected number of recision is at most max_depth. */
540 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
543 for (i = 1; i < depth; i++)
544 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
546 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
549 reason = "profile of recursive call is too large";
553 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
556 reason = "frequency of recursive call is too large";
560 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
561 depth is large. We reduce function call overhead and increase chances that
562 things fit in hardware return predictor.
564 Recursive inlining might however increase cost of stack frame setup
565 actually slowing down functions whose recursion tree is wide rather than
568 Deciding reliably on when to do recursive inlining without profile feedback
569 is tricky. For now we disable recursive inlining when probability of self
572 Recursive inlining of self recursive call within loop also results in large loop
573 depths that generally optimize badly. We may want to throttle down inlining
574 in those cases. In particular this seems to happen in one of libstdc++ rb tree
579 && (edge->count * 100 / outer_node->count
580 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
582 reason = "profile of recursive call is too small";
586 && (edge->frequency * 100 / caller_freq
587 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
589 reason = "frequency of recursive call is too small";
593 if (!want_inline && dump_file)
594 fprintf (dump_file, " not inlining recursively: %s\n", reason);
599 /* Decide if NODE is called once inlining it would eliminate need
600 for the offline copy of function. */
603 want_inline_function_called_once_p (struct cgraph_node *node)
605 /* Already inlined? */
606 if (node->global.inlined_to)
608 /* Zero or more then one callers? */
610 || node->callers->next_caller)
612 /* Recursive call makes no sense to inline. */
613 if (node->callers->caller == node)
615 /* External functions are not really in the unit, so inlining
616 them when called once would just increase the program size. */
617 if (DECL_EXTERNAL (node->decl))
619 /* Offline body must be optimized out. */
620 if (!cgraph_will_be_removed_from_program_if_no_direct_calls (node))
622 if (!can_inline_edge_p (node->callers, true))
627 /* A cost model driving the inlining heuristics in a way so the edges with
628 smallest badness are inlined first. After each inlining is performed
629 the costs of all caller edges of nodes affected are recomputed so the
630 metrics may accurately depend on values such as number of inlinable callers
631 of the function or function body size. */
634 edge_badness (struct cgraph_edge *edge, bool dump)
637 int growth, time_growth;
638 struct inline_summary *callee_info = inline_summary (edge->callee);
640 if (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl))
643 growth = estimate_edge_growth (edge);
644 time_growth = estimate_edge_time (edge);
648 fprintf (dump_file, " Badness calculation for %s -> %s\n",
649 cgraph_node_name (edge->caller),
650 cgraph_node_name (edge->callee));
651 fprintf (dump_file, " growth size %i, time %i\n",
656 /* Always prefer inlining saving code size. */
659 badness = INT_MIN - growth;
661 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
665 /* When profiling is available, base priorities -(#calls / growth).
666 So we optimize for overall number of "executed" inlined calls. */
670 benefitperc = (((gcov_type)callee_info->time
671 * edge->frequency / CGRAPH_FREQ_BASE - time_growth) * 100
672 / (callee_info->time + 1) + 1);
673 benefitperc = MIN (benefitperc, 100);
674 benefitperc = MAX (benefitperc, 0);
677 ((double) edge->count * INT_MIN / max_count / 100) *
678 benefitperc) / growth;
680 /* Be sure that insanity of the profile won't lead to increasing counts
681 in the scalling and thus to overflow in the computation above. */
682 gcc_assert (max_count >= edge->count);
686 " %i (relative %f): profile info. Relative count %f"
687 " * Relative benefit %f\n",
688 (int) badness, (double) badness / INT_MIN,
689 (double) edge->count / max_count,
690 (double) benefitperc);
694 /* When function local profile is available, base priorities on
695 growth / frequency, so we optimize for overall frequency of inlined
696 calls. This is not too accurate since while the call might be frequent
697 within function, the function itself is infrequent.
699 Other objective to optimize for is number of different calls inlined.
700 We add the estimated growth after inlining all functions to bias the
701 priorities slightly in this direction (so fewer times called functions
702 of the same size gets priority). */
703 else if (flag_guess_branch_prob)
705 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
708 badness = growth * 10000;
709 benefitperc = (((gcov_type)callee_info->time
710 * edge->frequency / CGRAPH_FREQ_BASE - time_growth) * 100
711 / (callee_info->time + 1) + 1);
712 benefitperc = MIN (benefitperc, 100);
713 benefitperc = MAX (benefitperc, 0);
716 /* Decrease badness if call is nested. */
717 /* Compress the range so we don't overflow. */
719 div = 10000 + ceil_log2 (div) - 8;
724 growth_for_all = estimate_growth (edge->callee);
725 badness += growth_for_all;
726 if (badness > INT_MAX)
731 " %i: guessed profile. frequency %i, overall growth %i,"
732 " benefit %i%%, divisor %i\n",
733 (int) badness, edge->frequency, growth_for_all,
737 /* When function local profile is not available or it does not give
738 useful information (ie frequency is zero), base the cost on
739 loop nest and overall size growth, so we optimize for overall number
740 of functions fully inlined in program. */
743 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
744 badness = estimate_growth (edge->callee) * 256;
746 /* Decrease badness if call is nested. */
754 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
758 /* Ensure that we did not overflow in all the fixed point math above. */
759 gcc_assert (badness >= INT_MIN);
760 gcc_assert (badness <= INT_MAX - 1);
761 /* Make recursive inlining happen always after other inlining is done. */
762 if (cgraph_edge_recursive_p (edge))
768 /* Recompute badness of EDGE and update its key in HEAP if needed. */
770 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
772 int badness = edge_badness (edge, false);
775 fibnode_t n = (fibnode_t) edge->aux;
776 gcc_checking_assert (n->data == edge);
778 /* fibheap_replace_key only decrease the keys.
779 When we increase the key we do not update heap
780 and instead re-insert the element once it becomes
781 a minimum of heap. */
782 if (badness < n->key)
784 fibheap_replace_key (heap, n, badness);
785 if (dump_file && (dump_flags & TDF_DETAILS))
788 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
789 cgraph_node_name (edge->caller), edge->caller->uid,
790 cgraph_node_name (edge->callee), edge->callee->uid,
794 gcc_checking_assert (n->key == badness);
799 if (dump_file && (dump_flags & TDF_DETAILS))
802 " enqueuing call %s/%i -> %s/%i, badness %i\n",
803 cgraph_node_name (edge->caller), edge->caller->uid,
804 cgraph_node_name (edge->callee), edge->callee->uid,
807 edge->aux = fibheap_insert (heap, badness, edge);
811 /* Recompute heap nodes for each of caller edge. */
814 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
815 bitmap updated_nodes)
817 struct cgraph_edge *edge;
819 if (!inline_summary (node)->inlinable
820 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
821 || node->global.inlined_to)
823 if (!bitmap_set_bit (updated_nodes, node->uid))
825 reset_node_growth_cache (node);
827 /* See if there is something to do. */
828 for (edge = node->callers; edge; edge = edge->next_caller)
829 if (edge->inline_failed)
834 for (; edge; edge = edge->next_caller)
835 if (edge->inline_failed)
837 reset_edge_growth_cache (edge);
838 if (can_inline_edge_p (edge, false)
839 && want_inline_small_function_p (edge, false))
840 update_edge_key (heap, edge);
843 report_inline_failed_reason (edge);
844 fibheap_delete_node (heap, (fibnode_t) edge->aux);
850 /* Recompute heap nodes for each uninlined call.
851 This is used when we know that edge badnesses are going only to increase
852 (we introduced new call site) and thus all we need is to insert newly
853 created edges into heap. */
856 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
857 bitmap updated_nodes)
859 struct cgraph_edge *e = node->callees;
861 reset_node_growth_cache (node);
866 if (!e->inline_failed && e->callee->callees)
867 e = e->callee->callees;
870 reset_edge_growth_cache (e);
872 && inline_summary (e->callee)->inlinable
873 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
874 && !bitmap_bit_p (updated_nodes, e->callee->uid))
876 reset_node_growth_cache (node);
877 update_edge_key (heap, e);
885 if (e->caller == node)
887 e = e->caller->callers;
889 while (!e->next_callee);
895 /* Recompute heap nodes for each of caller edges of each of callees.
896 Walk recursively into all inline clones. */
899 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
900 bitmap updated_nodes)
902 struct cgraph_edge *e = node->callees;
904 reset_node_growth_cache (node);
909 if (!e->inline_failed && e->callee->callees)
910 e = e->callee->callees;
913 if (e->inline_failed)
914 update_caller_keys (heap, e->callee, updated_nodes);
921 if (e->caller == node)
923 e = e->caller->callers;
925 while (!e->next_callee);
931 /* Enqueue all recursive calls from NODE into priority queue depending on
932 how likely we want to recursively inline the call. */
935 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
938 struct cgraph_edge *e;
939 for (e = where->callees; e; e = e->next_callee)
940 if (e->callee == node)
942 /* When profile feedback is available, prioritize by expected number
944 fibheap_insert (heap,
945 !max_count ? -e->frequency
946 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
949 for (e = where->callees; e; e = e->next_callee)
950 if (!e->inline_failed)
951 lookup_recursive_calls (node, e->callee, heap);
954 /* Decide on recursive inlining: in the case function has recursive calls,
955 inline until body size reaches given argument. If any new indirect edges
956 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
960 recursive_inlining (struct cgraph_edge *edge,
961 VEC (cgraph_edge_p, heap) **new_edges)
963 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
965 struct cgraph_node *node;
966 struct cgraph_edge *e;
967 struct cgraph_node *master_clone = NULL, *next;
972 if (node->global.inlined_to)
973 node = node->global.inlined_to;
975 if (DECL_DECLARED_INLINE_P (node->decl))
976 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
978 /* Make sure that function is small enough to be considered for inlining. */
979 if (estimate_size_after_inlining (node, edge) >= limit)
981 heap = fibheap_new ();
982 lookup_recursive_calls (node, node, heap);
983 if (fibheap_empty (heap))
985 fibheap_delete (heap);
991 " Performing recursive inlining on %s\n",
992 cgraph_node_name (node));
994 /* Do the inlining and update list of recursive call during process. */
995 while (!fibheap_empty (heap))
997 struct cgraph_edge *curr
998 = (struct cgraph_edge *) fibheap_extract_min (heap);
999 struct cgraph_node *cnode;
1001 if (estimate_size_after_inlining (node, curr) > limit)
1004 if (!can_inline_edge_p (curr, true))
1008 for (cnode = curr->caller;
1009 cnode->global.inlined_to; cnode = cnode->callers->caller)
1010 if (node->decl == curr->callee->decl)
1013 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1019 " Inlining call of depth %i", depth);
1022 fprintf (dump_file, " called approx. %.2f times per call",
1023 (double)curr->count / node->count);
1025 fprintf (dump_file, "\n");
1029 /* We need original clone to copy around. */
1030 master_clone = cgraph_clone_node (node, node->decl,
1031 node->count, CGRAPH_FREQ_BASE,
1033 for (e = master_clone->callees; e; e = e->next_callee)
1034 if (!e->inline_failed)
1035 clone_inlined_nodes (e, true, false, NULL);
1038 cgraph_redirect_edge_callee (curr, master_clone);
1039 inline_call (curr, false, new_edges, &overall_size);
1040 lookup_recursive_calls (node, curr->callee, heap);
1044 if (!fibheap_empty (heap) && dump_file)
1045 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1046 fibheap_delete (heap);
1053 "\n Inlined %i times, "
1054 "body grown from size %i to %i, time %i to %i\n", n,
1055 inline_summary (master_clone)->size, inline_summary (node)->size,
1056 inline_summary (master_clone)->time, inline_summary (node)->time);
1058 /* Remove master clone we used for inlining. We rely that clones inlined
1059 into master clone gets queued just before master clone so we don't
1061 for (node = cgraph_nodes; node != master_clone;
1065 if (node->global.inlined_to == master_clone)
1066 cgraph_remove_node (node);
1068 cgraph_remove_node (master_clone);
1073 /* Given whole compilation unit estimate of INSNS, compute how large we can
1074 allow the unit to grow. */
1077 compute_max_insns (int insns)
1079 int max_insns = insns;
1080 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1081 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1083 return ((HOST_WIDEST_INT) max_insns
1084 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1088 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1091 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1093 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1095 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1097 gcc_assert (!edge->aux);
1098 if (inline_summary (edge->callee)->inlinable
1099 && edge->inline_failed
1100 && can_inline_edge_p (edge, true)
1101 && want_inline_small_function_p (edge, true))
1102 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1107 /* We use greedy algorithm for inlining of small functions:
1108 All inline candidates are put into prioritized heap ordered in
1111 The inlining of small functions is bounded by unit growth parameters. */
1114 inline_small_functions (void)
1116 struct cgraph_node *node;
1117 struct cgraph_edge *edge;
1118 fibheap_t heap = fibheap_new ();
1119 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1120 int min_size, max_size;
1121 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1122 int initial_size = 0;
1124 if (flag_indirect_inlining)
1125 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1129 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1132 /* Compute overall unit size and other global parameters used by badness
1136 initialize_growth_caches ();
1138 for (node = cgraph_nodes; node; node = node->next)
1140 && !node->global.inlined_to)
1142 struct inline_summary *info = inline_summary (node);
1144 if (!DECL_EXTERNAL (node->decl))
1145 initial_size += info->size;
1147 for (edge = node->callers; edge; edge = edge->next_caller)
1148 if (max_count < edge->count)
1149 max_count = edge->count;
1152 overall_size = initial_size;
1153 max_size = compute_max_insns (overall_size);
1154 min_size = overall_size;
1156 /* Populate the heeap with all edges we might inline. */
1158 for (node = cgraph_nodes; node; node = node->next)
1160 && !node->global.inlined_to)
1163 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1164 cgraph_node_name (node), node->uid);
1166 for (edge = node->callers; edge; edge = edge->next_caller)
1167 if (edge->inline_failed
1168 && can_inline_edge_p (edge, true)
1169 && want_inline_small_function_p (edge, true)
1170 && edge->inline_failed)
1172 gcc_assert (!edge->aux);
1173 update_edge_key (heap, edge);
1177 gcc_assert (in_lto_p
1179 || (profile_info && flag_branch_probabilities));
1181 while (!fibheap_empty (heap))
1183 int old_size = overall_size;
1184 struct cgraph_node *where, *callee;
1185 int badness = fibheap_min_key (heap);
1186 int current_badness;
1189 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1190 gcc_assert (edge->aux);
1192 if (!edge->inline_failed)
1195 /* When updating the edge costs, we only decrease badness in the keys.
1196 Increases of badness are handled lazilly; when we see key with out
1197 of date value on it, we re-insert it now. */
1198 current_badness = edge_badness (edge, false);
1199 gcc_assert (current_badness >= badness);
1200 if (current_badness != badness)
1202 edge->aux = fibheap_insert (heap, current_badness, edge);
1206 if (!can_inline_edge_p (edge, true))
1209 callee = edge->callee;
1210 growth = estimate_edge_growth (edge);
1214 "\nConsidering %s with %i size\n",
1215 cgraph_node_name (edge->callee),
1216 inline_summary (edge->callee)->size);
1218 " to be inlined into %s in %s:%i\n"
1219 " Estimated growth after inlined into all is %+i insns.\n"
1220 " Estimated badness is %i, frequency %.2f.\n",
1221 cgraph_node_name (edge->caller),
1222 flag_wpa ? "unknown"
1223 : gimple_filename ((const_gimple) edge->call_stmt),
1225 : gimple_lineno ((const_gimple) edge->call_stmt),
1226 estimate_growth (edge->callee),
1228 edge->frequency / (double)CGRAPH_FREQ_BASE);
1230 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1232 if (dump_flags & TDF_DETAILS)
1233 edge_badness (edge, true);
1236 if (overall_size + growth > max_size
1237 && !DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl))
1239 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1240 report_inline_failed_reason (edge);
1244 if (!want_inline_small_function_p (edge, true))
1247 /* Heuristics for inlining small functions works poorly for
1248 recursive calls where we do efect similar to loop unrolling.
1249 When inliing such edge seems profitable, leave decision on
1250 specific inliner. */
1251 if (cgraph_edge_recursive_p (edge))
1253 where = edge->caller;
1254 if (where->global.inlined_to)
1255 where = where->global.inlined_to;
1256 if (!recursive_inlining (edge,
1257 flag_indirect_inlining
1258 ? &new_indirect_edges : NULL))
1260 edge->inline_failed = CIF_RECURSIVE_INLINING;
1263 /* Recursive inliner inlines all recursive calls of the function
1264 at once. Consequently we need to update all callee keys. */
1265 if (flag_indirect_inlining)
1266 add_new_edges_to_heap (heap, new_indirect_edges);
1267 update_all_callee_keys (heap, where, updated_nodes);
1271 struct cgraph_node *callee;
1272 struct cgraph_node *outer_node = NULL;
1275 /* Consider the case where self recursive function A is inlined into B.
1276 This is desired optimization in some cases, since it leads to effect
1277 similar of loop peeling and we might completely optimize out the
1278 recursive call. However we must be extra selective. */
1280 where = edge->caller;
1281 while (where->global.inlined_to)
1283 if (where->decl == edge->callee->decl)
1284 outer_node = where, depth++;
1285 where = where->callers->caller;
1288 && !want_inline_self_recursive_call_p (edge, outer_node,
1292 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1293 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1296 else if (depth && dump_file)
1297 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1299 callee = edge->callee;
1300 gcc_checking_assert (!callee->global.inlined_to);
1301 inline_call (edge, true, &new_indirect_edges, &overall_size);
1302 if (flag_indirect_inlining)
1303 add_new_edges_to_heap (heap, new_indirect_edges);
1305 /* We inlined last offline copy to the body. This might lead
1306 to callees of function having fewer call sites and thus they
1307 may need updating. */
1308 if (callee->global.inlined_to)
1309 update_all_callee_keys (heap, callee, updated_nodes);
1311 update_callee_keys (heap, edge->callee, updated_nodes);
1313 where = edge->caller;
1314 if (where->global.inlined_to)
1315 where = where->global.inlined_to;
1317 /* Our profitability metric can depend on local properties
1318 such as number of inlinable calls and size of the function body.
1319 After inlining these properties might change for the function we
1320 inlined into (since it's body size changed) and for the functions
1321 called by function we inlined (since number of it inlinable callers
1323 update_caller_keys (heap, where, updated_nodes);
1325 /* We removed one call of the function we just inlined. If offline
1326 copy is still needed, be sure to update the keys. */
1327 if (callee != where && !callee->global.inlined_to)
1328 update_caller_keys (heap, callee, updated_nodes);
1329 bitmap_clear (updated_nodes);
1334 " Inlined into %s which now has time %i and size %i,"
1335 "net change of %+i.\n",
1336 cgraph_node_name (edge->caller),
1337 inline_summary (edge->caller)->time,
1338 inline_summary (edge->caller)->size,
1339 overall_size - old_size);
1341 if (min_size > overall_size)
1343 min_size = overall_size;
1344 max_size = compute_max_insns (min_size);
1347 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1351 free_growth_caches ();
1352 if (new_indirect_edges)
1353 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1354 fibheap_delete (heap);
1357 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1358 initial_size, overall_size,
1359 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1360 BITMAP_FREE (updated_nodes);
1363 /* Flatten NODE. Performed both during early inlining and
1364 at IPA inlining time. */
1367 flatten_function (struct cgraph_node *node, bool early)
1369 struct cgraph_edge *e;
1371 /* We shouldn't be called recursively when we are being processed. */
1372 gcc_assert (node->aux == NULL);
1374 node->aux = (void *) node;
1376 for (e = node->callees; e; e = e->next_callee)
1378 struct cgraph_node *orig_callee;
1380 /* We've hit cycle? It is time to give up. */
1385 "Not inlining %s into %s to avoid cycle.\n",
1386 cgraph_node_name (e->callee),
1387 cgraph_node_name (e->caller));
1388 e->inline_failed = CIF_RECURSIVE_INLINING;
1392 /* When the edge is already inlined, we just need to recurse into
1393 it in order to fully flatten the leaves. */
1394 if (!e->inline_failed)
1396 flatten_function (e->callee, early);
1400 /* Flatten attribute needs to be processed during late inlining. For
1401 extra code quality we however do flattening during early optimization,
1404 ? !can_inline_edge_p (e, true)
1405 : !can_early_inline_edge_p (e))
1408 if (cgraph_edge_recursive_p (e))
1411 fprintf (dump_file, "Not inlining: recursive call.\n");
1415 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1416 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1419 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1423 /* Inline the edge and flatten the inline clone. Avoid
1424 recursing through the original node if the node was cloned. */
1426 fprintf (dump_file, " Inlining %s into %s.\n",
1427 cgraph_node_name (e->callee),
1428 cgraph_node_name (e->caller));
1429 orig_callee = e->callee;
1430 inline_call (e, true, NULL, NULL);
1431 if (e->callee != orig_callee)
1432 orig_callee->aux = (void *) node;
1433 flatten_function (e->callee, early);
1434 if (e->callee != orig_callee)
1435 orig_callee->aux = NULL;
1441 /* Decide on the inlining. We do so in the topological order to avoid
1442 expenses on updating data structures. */
1447 struct cgraph_node *node;
1449 struct cgraph_node **order =
1450 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1453 if (in_lto_p && flag_indirect_inlining)
1454 ipa_update_after_lto_read ();
1455 if (flag_indirect_inlining)
1456 ipa_create_all_structures_for_iinln ();
1459 dump_inline_summaries (dump_file);
1461 nnodes = ipa_reverse_postorder (order);
1463 for (node = cgraph_nodes; node; node = node->next)
1467 fprintf (dump_file, "\nFlattening functions:\n");
1469 /* In the first pass handle functions to be flattened. Do this with
1470 a priority so none of our later choices will make this impossible. */
1471 for (i = nnodes - 1; i >= 0; i--)
1475 /* Handle nodes to be flattened.
1476 Ideally when processing callees we stop inlining at the
1477 entry of cycles, possibly cloning that entry point and
1478 try to flatten itself turning it into a self-recursive
1480 if (lookup_attribute ("flatten",
1481 DECL_ATTRIBUTES (node->decl)) != NULL)
1485 "Flattening %s\n", cgraph_node_name (node));
1486 flatten_function (node, false);
1490 inline_small_functions ();
1491 cgraph_remove_unreachable_nodes (true, dump_file);
1494 /* We already perform some inlining of functions called once during
1495 inlining small functions above. After unreachable nodes are removed,
1496 we still might do a quick check that nothing new is found. */
1497 if (flag_inline_functions_called_once)
1501 fprintf (dump_file, "\nDeciding on functions called once:\n");
1503 /* Inlining one function called once has good chance of preventing
1504 inlining other function into the same callee. Ideally we should
1505 work in priority order, but probably inlining hot functions first
1506 is good cut without the extra pain of maintaining the queue.
1508 ??? this is not really fitting the bill perfectly: inlining function
1509 into callee often leads to better optimization of callee due to
1510 increased context for optimization.
1511 For example if main() function calls a function that outputs help
1512 and then function that does the main optmization, we should inline
1513 the second with priority even if both calls are cold by themselves.
1515 We probably want to implement new predicate replacing our use of
1516 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1518 for (cold = 0; cold <= 1; cold ++)
1520 for (node = cgraph_nodes; node; node = node->next)
1522 if (want_inline_function_called_once_p (node)
1524 || cgraph_maybe_hot_edge_p (node->callers)))
1526 struct cgraph_node *caller = node->callers->caller;
1531 "\nInlining %s size %i.\n",
1532 cgraph_node_name (node), inline_summary (node)->size);
1534 " Called once from %s %i insns.\n",
1535 cgraph_node_name (node->callers->caller),
1536 inline_summary (node->callers->caller)->size);
1539 inline_call (node->callers, true, NULL, NULL);
1542 " Inlined into %s which now has %i size\n",
1543 cgraph_node_name (caller),
1544 inline_summary (caller)->size);
1550 /* Free ipa-prop structures if they are no longer needed. */
1551 if (flag_indirect_inlining)
1552 ipa_free_all_structures_after_iinln ();
1556 "\nInlined %i calls, eliminated %i functions\n\n",
1557 ncalls_inlined, nfunctions_inlined);
1560 dump_inline_summaries (dump_file);
1561 /* In WPA we use inline summaries for partitioning process. */
1563 inline_free_summary ();
1567 /* Inline always-inline function calls in NODE. */
1570 inline_always_inline_functions (struct cgraph_node *node)
1572 struct cgraph_edge *e;
1573 bool inlined = false;
1575 for (e = node->callees; e; e = e->next_callee)
1577 if (!DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
1580 if (cgraph_edge_recursive_p (e))
1583 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1584 cgraph_node_name (e->callee));
1585 e->inline_failed = CIF_RECURSIVE_INLINING;
1589 if (!can_early_inline_edge_p (e))
1593 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1594 cgraph_node_name (e->callee),
1595 cgraph_node_name (e->caller));
1596 inline_call (e, true, NULL, NULL);
1603 /* Decide on the inlining. We do so in the topological order to avoid
1604 expenses on updating data structures. */
1607 early_inline_small_functions (struct cgraph_node *node)
1609 struct cgraph_edge *e;
1610 bool inlined = false;
1612 for (e = node->callees; e; e = e->next_callee)
1614 if (!inline_summary (e->callee)->inlinable
1615 || !e->inline_failed)
1618 /* Do not consider functions not declared inline. */
1619 if (!DECL_DECLARED_INLINE_P (e->callee->decl)
1620 && !flag_inline_small_functions
1621 && !flag_inline_functions)
1625 fprintf (dump_file, "Considering inline candidate %s.\n",
1626 cgraph_node_name (e->callee));
1628 if (!can_early_inline_edge_p (e))
1631 if (cgraph_edge_recursive_p (e))
1634 fprintf (dump_file, " Not inlining: recursive call.\n");
1638 if (!want_early_inline_function_p (e))
1642 fprintf (dump_file, " Inlining %s into %s.\n",
1643 cgraph_node_name (e->callee),
1644 cgraph_node_name (e->caller));
1645 inline_call (e, true, NULL, NULL);
1652 /* Do inlining of small functions. Doing so early helps profiling and other
1653 passes to be somewhat more effective and avoids some code duplication in
1654 later real inlining pass for testcases with very many function calls. */
1656 early_inliner (void)
1658 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1659 struct cgraph_edge *edge;
1660 unsigned int todo = 0;
1662 bool inlined = false;
1667 /* Do nothing if datastructures for ipa-inliner are already computed. This
1668 happens when some pass decides to construct new function and
1669 cgraph_add_new_function calls lowering passes and early optimization on
1670 it. This may confuse ourself when early inliner decide to inline call to
1671 function clone, because function clones don't have parameter list in
1672 ipa-prop matching their signature. */
1673 if (ipa_node_params_vector)
1676 #ifdef ENABLE_CHECKING
1677 verify_cgraph_node (node);
1680 /* Even when not optimizing or not inlining inline always-inline
1682 inlined = inline_always_inline_functions (node);
1686 || !flag_early_inlining
1687 /* Never inline regular functions into always-inline functions
1688 during incremental inlining. This sucks as functions calling
1689 always inline functions will get less optimized, but at the
1690 same time inlining of functions calling always inline
1691 function into an always inline function might introduce
1692 cycles of edges to be always inlined in the callgraph.
1694 We might want to be smarter and just avoid this type of inlining. */
1695 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
1697 else if (lookup_attribute ("flatten",
1698 DECL_ATTRIBUTES (node->decl)) != NULL)
1700 /* When the function is marked to be flattened, recursively inline
1704 "Flattening %s\n", cgraph_node_name (node));
1705 flatten_function (node, true);
1710 /* We iterate incremental inlining to get trivial cases of indirect
1712 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1713 && early_inline_small_functions (node))
1715 timevar_push (TV_INTEGRATION);
1716 todo |= optimize_inline_calls (current_function_decl);
1718 /* Technically we ought to recompute inline parameters so the new
1719 iteration of early inliner works as expected. We however have
1720 values approximately right and thus we only need to update edge
1721 info that might be cleared out for newly discovered edges. */
1722 for (edge = node->callees; edge; edge = edge->next_callee)
1724 struct inline_edge_summary *es = inline_edge_summary (edge);
1726 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1728 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1730 timevar_pop (TV_INTEGRATION);
1735 fprintf (dump_file, "Iterations: %i\n", iterations);
1740 timevar_push (TV_INTEGRATION);
1741 todo |= optimize_inline_calls (current_function_decl);
1742 timevar_pop (TV_INTEGRATION);
1745 cfun->always_inline_functions_inlined = true;
1750 struct gimple_opt_pass pass_early_inline =
1754 "einline", /* name */
1756 early_inliner, /* execute */
1759 0, /* static_pass_number */
1760 TV_INLINE_HEURISTICS, /* tv_id */
1761 PROP_ssa, /* properties_required */
1762 0, /* properties_provided */
1763 0, /* properties_destroyed */
1764 0, /* todo_flags_start */
1765 TODO_dump_func /* todo_flags_finish */
1770 /* When to run IPA inlining. Inlining of always-inline functions
1771 happens during early inlining. */
1774 gate_ipa_inline (void)
1776 /* ??? We'd like to skip this if not optimizing or not inlining as
1777 all always-inline functions have been processed by early
1778 inlining already. But this at least breaks EH with C++ as
1779 we need to unconditionally run fixup_cfg even at -O0.
1780 So leave it on unconditionally for now. */
1784 struct ipa_opt_pass_d pass_ipa_inline =
1788 "inline", /* name */
1789 gate_ipa_inline, /* gate */
1790 ipa_inline, /* execute */
1793 0, /* static_pass_number */
1794 TV_INLINE_HEURISTICS, /* tv_id */
1795 0, /* properties_required */
1796 0, /* properties_provided */
1797 0, /* properties_destroyed */
1798 TODO_remove_functions, /* todo_flags_finish */
1799 TODO_dump_cgraph | TODO_dump_func
1800 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
1802 inline_generate_summary, /* generate_summary */
1803 inline_write_summary, /* write_summary */
1804 inline_read_summary, /* read_summary */
1805 NULL, /* write_optimization_summary */
1806 NULL, /* read_optimization_summary */
1807 NULL, /* stmt_fixup */
1809 inline_transform, /* function_transform */
1810 NULL, /* variable_transform */