1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
39 The inliner itself is split into two passes:
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
95 #include "coretypes.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
108 #include "tree-pass.h"
109 #include "coverage.h"
112 #include "tree-flow.h"
113 #include "ipa-prop.h"
116 #include "ipa-inline.h"
118 /* Statistics we collect about inlining algorithm. */
119 static int overall_size;
120 static gcov_type max_count;
122 /* Return false when inlining edge E would lead to violating
123 limits on function unit growth or stack usage growth.
125 The relative function body growth limit is present generally
126 to avoid problems with non-linear behavior of the compiler.
127 To allow inlining huge functions into tiny wrapper, the limit
128 is always based on the bigger of the two functions considered.
130 For stack growth limits we always base the growth in stack usage
131 of the callers. We want to prevent applications from segfaulting
132 on stack overflow when functions with huge stack frames gets
136 caller_growth_limits (struct cgraph_edge *e)
138 struct cgraph_node *to = e->caller;
139 struct cgraph_node *what = e->callee;
142 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
143 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
145 /* Look for function e->caller is inlined to. While doing
146 so work out the largest function body on the way. As
147 described above, we want to base our function growth
148 limits based on that. Not on the self size of the
149 outer function, not on the self size of inline code
150 we immediately inline to. This is the most relaxed
151 interpretation of the rule "do not grow large functions
152 too much in order to prevent compiler from exploding". */
155 info = inline_summary (to);
156 if (limit < info->self_size)
157 limit = info->self_size;
158 if (stack_size_limit < info->estimated_self_stack_size)
159 stack_size_limit = info->estimated_self_stack_size;
160 if (to->global.inlined_to)
161 to = to->callers->caller;
163 while (to->global.inlined_to);
165 what_info = inline_summary (what);
167 if (limit < what_info->self_size)
168 limit = what_info->self_size;
170 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
172 /* Check the size after inlining against the function limits. But allow
173 the function to shrink if it went over the limits by forced inlining. */
174 newsize = estimate_size_after_inlining (to, e);
175 if (newsize >= info->size
176 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
179 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
183 /* FIXME: Stack size limit often prevents inlining in Fortran programs
184 due to large i/o datastructures used by the Fortran front-end.
185 We ought to ignore this limit when we know that the edge is executed
186 on every invocation of the caller (i.e. its call statement dominates
187 exit block). We do not track this information, yet. */
188 stack_size_limit += (stack_size_limit
189 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
191 inlined_stack = (outer_info->stack_frame_offset
192 + outer_info->estimated_self_stack_size
193 + what_info->estimated_stack_size);
194 /* Check new stack consumption with stack consumption at the place
196 if (inlined_stack > stack_size_limit
197 /* If function already has large stack usage from sibling
198 inline call, we can inline, too.
199 This bit overoptimistically assume that we are good at stack
201 && inlined_stack > info->estimated_stack_size
202 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
204 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
210 /* Dump info about why inlining has failed. */
213 report_inline_failed_reason (struct cgraph_edge *e)
217 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
218 cgraph_node_name (e->caller), e->caller->uid,
219 cgraph_node_name (e->callee), e->callee->uid,
220 cgraph_inline_failed_string (e->inline_failed));
224 /* Decide if we can inline the edge and possibly update
225 inline_failed reason.
226 We check whether inlining is possible at all and whether
227 caller growth limits allow doing so.
229 if REPORT is true, output reason to the dump file. */
232 can_inline_edge_p (struct cgraph_edge *e, bool report)
234 bool inlinable = true;
235 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
236 tree callee_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->callee->decl);
238 gcc_assert (e->inline_failed);
240 if (!e->callee->analyzed)
242 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
245 else if (!inline_summary (e->callee)->inlinable)
247 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
250 else if (cgraph_function_body_availability (e->callee) <= AVAIL_OVERWRITABLE)
252 e->inline_failed = CIF_OVERWRITABLE;
255 else if (e->call_stmt_cannot_inline_p)
257 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
260 /* Don't inline if the functions have different EH personalities. */
261 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
262 && DECL_FUNCTION_PERSONALITY (e->callee->decl)
263 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
264 != DECL_FUNCTION_PERSONALITY (e->callee->decl)))
266 e->inline_failed = CIF_EH_PERSONALITY;
269 /* Don't inline if the callee can throw non-call exceptions but the
271 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
272 Move the flag into cgraph node or mirror it in the inline summary. */
273 else if (DECL_STRUCT_FUNCTION (e->callee->decl)
274 && DECL_STRUCT_FUNCTION (e->callee->decl)->can_throw_non_call_exceptions
275 && !(DECL_STRUCT_FUNCTION (e->caller->decl)
276 && DECL_STRUCT_FUNCTION (e->caller->decl)->can_throw_non_call_exceptions))
278 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
281 /* Check compatibility of target optimization options. */
282 else if (!targetm.target_option.can_inline_p (e->caller->decl,
285 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
288 /* Check if caller growth allows the inlining. */
289 else if (!DECL_DISREGARD_INLINE_LIMITS (e->callee->decl)
290 && !caller_growth_limits (e))
292 /* Don't inline a function with a higher optimization level than the
293 caller. FIXME: this is really just tip of iceberg of handling
294 optimization attribute. */
295 else if (caller_tree != callee_tree)
297 struct cl_optimization *caller_opt
298 = TREE_OPTIMIZATION ((caller_tree)
300 : optimization_default_node);
302 struct cl_optimization *callee_opt
303 = TREE_OPTIMIZATION ((callee_tree)
305 : optimization_default_node);
307 if ((caller_opt->x_optimize > callee_opt->x_optimize)
308 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
310 e->inline_failed = CIF_TARGET_OPTIMIZATION_MISMATCH;
315 /* Be sure that the cannot_inline_p flag is up to date. */
316 gcc_checking_assert (!e->call_stmt
317 || (gimple_call_cannot_inline_p (e->call_stmt)
318 == e->call_stmt_cannot_inline_p)
319 /* In -flto-partition=none mode we really keep things out of
320 sync because call_stmt_cannot_inline_p is set at cgraph
321 merging when function bodies are not there yet. */
322 || (in_lto_p && !gimple_call_cannot_inline_p (e->call_stmt)));
323 if (!inlinable && report)
324 report_inline_failed_reason (e);
329 /* Return true if the edge E is inlinable during early inlining. */
332 can_early_inline_edge_p (struct cgraph_edge *e)
334 /* Early inliner might get called at WPA stage when IPA pass adds new
335 function. In this case we can not really do any of early inlining
336 because function bodies are missing. */
337 if (!gimple_has_body_p (e->callee->decl))
339 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
342 /* In early inliner some of callees may not be in SSA form yet
343 (i.e. the callgraph is cyclic and we did not process
344 the callee by early inliner, yet). We don't have CIF code for this
345 case; later we will re-do the decision in the real inliner. */
346 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
347 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
350 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
353 if (!can_inline_edge_p (e, true))
359 /* Return true when N is leaf function. Accept cheap builtins
360 in leaf functions. */
363 leaf_node_p (struct cgraph_node *n)
365 struct cgraph_edge *e;
366 for (e = n->callees; e; e = e->next_callee)
367 if (!is_inexpensive_builtin (e->callee->decl))
373 /* Return true if we are interested in inlining small function. */
376 want_early_inline_function_p (struct cgraph_edge *e)
378 bool want_inline = true;
380 if (DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
382 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
383 && !flag_inline_small_functions)
385 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
386 report_inline_failed_reason (e);
391 int growth = estimate_edge_growth (e);
394 else if (!cgraph_maybe_hot_edge_p (e)
398 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
399 "call is cold and code would grow by %i\n",
400 cgraph_node_name (e->caller), e->caller->uid,
401 cgraph_node_name (e->callee), e->callee->uid,
405 else if (!leaf_node_p (e->callee)
409 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
410 "callee is not leaf and code would grow by %i\n",
411 cgraph_node_name (e->caller), e->caller->uid,
412 cgraph_node_name (e->callee), e->callee->uid,
416 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
419 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
420 "growth %i exceeds --param early-inlining-insns\n",
421 cgraph_node_name (e->caller), e->caller->uid,
422 cgraph_node_name (e->callee), e->callee->uid,
430 /* Return true if we are interested in inlining small function.
431 When REPORT is true, report reason to dump file. */
434 want_inline_small_function_p (struct cgraph_edge *e, bool report)
436 bool want_inline = true;
438 if (DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
440 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
441 && !flag_inline_small_functions)
443 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
448 int growth = estimate_edge_growth (e);
452 else if (DECL_DECLARED_INLINE_P (e->callee->decl)
453 && growth >= MAX_INLINE_INSNS_SINGLE)
455 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
458 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
459 && !flag_inline_functions)
461 e->inline_failed = CIF_NOT_DECLARED_INLINED;
464 else if (!DECL_DECLARED_INLINE_P (e->callee->decl)
465 && growth >= MAX_INLINE_INSNS_AUTO)
467 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
470 else if (!cgraph_maybe_hot_edge_p (e)
471 && estimate_growth (e->callee) > 0)
473 e->inline_failed = CIF_UNLIKELY_CALL;
477 if (!want_inline && report)
478 report_inline_failed_reason (e);
482 /* EDGE is self recursive edge.
483 We hand two cases - when function A is inlining into itself
484 or when function A is being inlined into another inliner copy of function
487 In first case OUTER_NODE points to the toplevel copy of A, while
488 in the second case OUTER_NODE points to the outermost copy of A in B.
490 In both cases we want to be extra selective since
491 inlining the call will just introduce new recursive calls to appear. */
494 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
495 struct cgraph_node *outer_node,
499 char const *reason = NULL;
500 bool want_inline = true;
501 int caller_freq = CGRAPH_FREQ_BASE;
502 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
504 if (DECL_DECLARED_INLINE_P (edge->callee->decl))
505 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
507 if (!cgraph_maybe_hot_edge_p (edge))
509 reason = "recursive call is cold";
512 else if (max_count && !outer_node->count)
514 reason = "not executed in profile";
517 else if (depth > max_depth)
519 reason = "--param max-inline-recursive-depth exceeded.";
523 if (outer_node->global.inlined_to)
524 caller_freq = outer_node->callers->frequency;
528 /* Inlining of self recursive function into copy of itself within other function
529 is transformation similar to loop peeling.
531 Peeling is profitable if we can inline enough copies to make probability
532 of actual call to the self recursive function very small. Be sure that
533 the probability of recursion is small.
535 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
536 This way the expected number of recision is at most max_depth. */
539 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
542 for (i = 1; i < depth; i++)
543 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
545 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
548 reason = "profile of recursive call is too large";
552 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
555 reason = "frequency of recursive call is too large";
559 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
560 depth is large. We reduce function call overhead and increase chances that
561 things fit in hardware return predictor.
563 Recursive inlining might however increase cost of stack frame setup
564 actually slowing down functions whose recursion tree is wide rather than
567 Deciding reliably on when to do recursive inlining without profile feedback
568 is tricky. For now we disable recursive inlining when probability of self
571 Recursive inlining of self recursive call within loop also results in large loop
572 depths that generally optimize badly. We may want to throttle down inlining
573 in those cases. In particular this seems to happen in one of libstdc++ rb tree
578 && (edge->count * 100 / outer_node->count
579 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
581 reason = "profile of recursive call is too small";
585 && (edge->frequency * 100 / caller_freq
586 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
588 reason = "frequency of recursive call is too small";
592 if (!want_inline && dump_file)
593 fprintf (dump_file, " not inlining recursively: %s\n", reason);
598 /* Decide if NODE is called once inlining it would eliminate need
599 for the offline copy of function. */
602 want_inline_function_called_once_p (struct cgraph_node *node)
604 /* Already inlined? */
605 if (node->global.inlined_to)
607 /* Zero or more then one callers? */
609 || node->callers->next_caller)
611 /* Recursive call makes no sense to inline. */
612 if (node->callers->caller == node)
614 /* External functions are not really in the unit, so inlining
615 them when called once would just increase the program size. */
616 if (DECL_EXTERNAL (node->decl))
618 /* Offline body must be optimized out. */
619 if (!cgraph_will_be_removed_from_program_if_no_direct_calls (node))
621 if (!can_inline_edge_p (node->callers, true))
626 /* A cost model driving the inlining heuristics in a way so the edges with
627 smallest badness are inlined first. After each inlining is performed
628 the costs of all caller edges of nodes affected are recomputed so the
629 metrics may accurately depend on values such as number of inlinable callers
630 of the function or function body size. */
633 edge_badness (struct cgraph_edge *edge, bool dump)
636 int growth, time_growth;
637 struct inline_summary *callee_info = inline_summary (edge->callee);
639 if (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl))
642 growth = estimate_edge_growth (edge);
643 time_growth = estimate_edge_time (edge);
647 fprintf (dump_file, " Badness calculation for %s -> %s\n",
648 cgraph_node_name (edge->caller),
649 cgraph_node_name (edge->callee));
650 fprintf (dump_file, " growth size %i, time %i\n",
655 /* Always prefer inlining saving code size. */
658 badness = INT_MIN - growth;
660 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
664 /* When profiling is available, base priorities -(#calls / growth).
665 So we optimize for overall number of "executed" inlined calls. */
669 benefitperc = (((gcov_type)callee_info->time
670 * edge->frequency / CGRAPH_FREQ_BASE - time_growth) * 100
671 / (callee_info->time + 1) + 1);
672 benefitperc = MIN (benefitperc, 100);
673 benefitperc = MAX (benefitperc, 0);
676 ((double) edge->count * INT_MIN / max_count / 100) *
677 benefitperc) / growth;
679 /* Be sure that insanity of the profile won't lead to increasing counts
680 in the scalling and thus to overflow in the computation above. */
681 gcc_assert (max_count >= edge->count);
685 " %i (relative %f): profile info. Relative count %f"
686 " * Relative benefit %f\n",
687 (int) badness, (double) badness / INT_MIN,
688 (double) edge->count / max_count,
689 (double) benefitperc);
693 /* When function local profile is available, base priorities on
694 growth / frequency, so we optimize for overall frequency of inlined
695 calls. This is not too accurate since while the call might be frequent
696 within function, the function itself is infrequent.
698 Other objective to optimize for is number of different calls inlined.
699 We add the estimated growth after inlining all functions to bias the
700 priorities slightly in this direction (so fewer times called functions
701 of the same size gets priority). */
702 else if (flag_guess_branch_prob)
704 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
707 badness = growth * 10000;
708 benefitperc = (((gcov_type)callee_info->time
709 * edge->frequency / CGRAPH_FREQ_BASE - time_growth) * 100
710 / (callee_info->time + 1) + 1);
711 benefitperc = MIN (benefitperc, 100);
712 benefitperc = MAX (benefitperc, 0);
715 /* Decrease badness if call is nested. */
716 /* Compress the range so we don't overflow. */
718 div = 10000 + ceil_log2 (div) - 8;
723 growth_for_all = estimate_growth (edge->callee);
724 badness += growth_for_all;
725 if (badness > INT_MAX)
730 " %i: guessed profile. frequency %i, overall growth %i,"
731 " benefit %i%%, divisor %i\n",
732 (int) badness, edge->frequency, growth_for_all,
736 /* When function local profile is not available or it does not give
737 useful information (ie frequency is zero), base the cost on
738 loop nest and overall size growth, so we optimize for overall number
739 of functions fully inlined in program. */
742 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
743 badness = estimate_growth (edge->callee) * 256;
745 /* Decrease badness if call is nested. */
753 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
757 /* Ensure that we did not overflow in all the fixed point math above. */
758 gcc_assert (badness >= INT_MIN);
759 gcc_assert (badness <= INT_MAX - 1);
760 /* Make recursive inlining happen always after other inlining is done. */
761 if (cgraph_edge_recursive_p (edge))
767 /* Recompute badness of EDGE and update its key in HEAP if needed. */
769 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
771 int badness = edge_badness (edge, false);
774 fibnode_t n = (fibnode_t) edge->aux;
775 gcc_checking_assert (n->data == edge);
777 /* fibheap_replace_key only decrease the keys.
778 When we increase the key we do not update heap
779 and instead re-insert the element once it becomes
780 a minimum of heap. */
781 if (badness < n->key)
783 fibheap_replace_key (heap, n, badness);
784 if (dump_file && (dump_flags & TDF_DETAILS))
787 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
788 cgraph_node_name (edge->caller), edge->caller->uid,
789 cgraph_node_name (edge->callee), edge->callee->uid,
793 gcc_checking_assert (n->key == badness);
798 if (dump_file && (dump_flags & TDF_DETAILS))
801 " enqueuing call %s/%i -> %s/%i, badness %i\n",
802 cgraph_node_name (edge->caller), edge->caller->uid,
803 cgraph_node_name (edge->callee), edge->callee->uid,
806 edge->aux = fibheap_insert (heap, badness, edge);
810 /* Recompute heap nodes for each of caller edge. */
813 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
814 bitmap updated_nodes)
816 struct cgraph_edge *edge;
818 if (!inline_summary (node)->inlinable
819 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
820 || node->global.inlined_to)
822 if (!bitmap_set_bit (updated_nodes, node->uid))
824 reset_node_growth_cache (node);
826 /* See if there is something to do. */
827 for (edge = node->callers; edge; edge = edge->next_caller)
828 if (edge->inline_failed)
833 for (; edge; edge = edge->next_caller)
834 if (edge->inline_failed)
836 reset_edge_growth_cache (edge);
837 if (can_inline_edge_p (edge, false)
838 && want_inline_small_function_p (edge, false))
839 update_edge_key (heap, edge);
842 report_inline_failed_reason (edge);
843 fibheap_delete_node (heap, (fibnode_t) edge->aux);
849 /* Recompute heap nodes for each uninlined call.
850 This is used when we know that edge badnesses are going only to increase
851 (we introduced new call site) and thus all we need is to insert newly
852 created edges into heap. */
855 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
856 bitmap updated_nodes)
858 struct cgraph_edge *e = node->callees;
860 reset_node_growth_cache (node);
865 if (!e->inline_failed && e->callee->callees)
866 e = e->callee->callees;
869 reset_edge_growth_cache (e);
871 && inline_summary (e->callee)->inlinable
872 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
873 && !bitmap_bit_p (updated_nodes, e->callee->uid))
875 reset_node_growth_cache (node);
876 update_edge_key (heap, e);
884 if (e->caller == node)
886 e = e->caller->callers;
888 while (!e->next_callee);
894 /* Recompute heap nodes for each of caller edges of each of callees.
895 Walk recursively into all inline clones. */
898 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
899 bitmap updated_nodes)
901 struct cgraph_edge *e = node->callees;
903 reset_node_growth_cache (node);
908 if (!e->inline_failed && e->callee->callees)
909 e = e->callee->callees;
912 if (e->inline_failed)
913 update_caller_keys (heap, e->callee, updated_nodes);
920 if (e->caller == node)
922 e = e->caller->callers;
924 while (!e->next_callee);
930 /* Enqueue all recursive calls from NODE into priority queue depending on
931 how likely we want to recursively inline the call. */
934 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
937 struct cgraph_edge *e;
938 for (e = where->callees; e; e = e->next_callee)
939 if (e->callee == node)
941 /* When profile feedback is available, prioritize by expected number
943 fibheap_insert (heap,
944 !max_count ? -e->frequency
945 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
948 for (e = where->callees; e; e = e->next_callee)
949 if (!e->inline_failed)
950 lookup_recursive_calls (node, e->callee, heap);
953 /* Decide on recursive inlining: in the case function has recursive calls,
954 inline until body size reaches given argument. If any new indirect edges
955 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
959 recursive_inlining (struct cgraph_edge *edge,
960 VEC (cgraph_edge_p, heap) **new_edges)
962 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
964 struct cgraph_node *node;
965 struct cgraph_edge *e;
966 struct cgraph_node *master_clone = NULL, *next;
971 if (node->global.inlined_to)
972 node = node->global.inlined_to;
974 if (DECL_DECLARED_INLINE_P (node->decl))
975 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
977 /* Make sure that function is small enough to be considered for inlining. */
978 if (estimate_size_after_inlining (node, edge) >= limit)
980 heap = fibheap_new ();
981 lookup_recursive_calls (node, node, heap);
982 if (fibheap_empty (heap))
984 fibheap_delete (heap);
990 " Performing recursive inlining on %s\n",
991 cgraph_node_name (node));
993 /* Do the inlining and update list of recursive call during process. */
994 while (!fibheap_empty (heap))
996 struct cgraph_edge *curr
997 = (struct cgraph_edge *) fibheap_extract_min (heap);
998 struct cgraph_node *cnode;
1000 if (estimate_size_after_inlining (node, curr) > limit)
1003 if (!can_inline_edge_p (curr, true))
1007 for (cnode = curr->caller;
1008 cnode->global.inlined_to; cnode = cnode->callers->caller)
1009 if (node->decl == curr->callee->decl)
1012 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1018 " Inlining call of depth %i", depth);
1021 fprintf (dump_file, " called approx. %.2f times per call",
1022 (double)curr->count / node->count);
1024 fprintf (dump_file, "\n");
1028 /* We need original clone to copy around. */
1029 master_clone = cgraph_clone_node (node, node->decl,
1030 node->count, CGRAPH_FREQ_BASE,
1032 for (e = master_clone->callees; e; e = e->next_callee)
1033 if (!e->inline_failed)
1034 clone_inlined_nodes (e, true, false, NULL);
1037 cgraph_redirect_edge_callee (curr, master_clone);
1038 inline_call (curr, false, new_edges, &overall_size);
1039 lookup_recursive_calls (node, curr->callee, heap);
1043 if (!fibheap_empty (heap) && dump_file)
1044 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1045 fibheap_delete (heap);
1052 "\n Inlined %i times, "
1053 "body grown from size %i to %i, time %i to %i\n", n,
1054 inline_summary (master_clone)->size, inline_summary (node)->size,
1055 inline_summary (master_clone)->time, inline_summary (node)->time);
1057 /* Remove master clone we used for inlining. We rely that clones inlined
1058 into master clone gets queued just before master clone so we don't
1060 for (node = cgraph_nodes; node != master_clone;
1064 if (node->global.inlined_to == master_clone)
1065 cgraph_remove_node (node);
1067 cgraph_remove_node (master_clone);
1072 /* Given whole compilation unit estimate of INSNS, compute how large we can
1073 allow the unit to grow. */
1076 compute_max_insns (int insns)
1078 int max_insns = insns;
1079 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1080 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1082 return ((HOST_WIDEST_INT) max_insns
1083 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1087 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1090 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1092 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1094 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1096 gcc_assert (!edge->aux);
1097 if (inline_summary (edge->callee)->inlinable
1098 && edge->inline_failed
1099 && can_inline_edge_p (edge, true)
1100 && want_inline_small_function_p (edge, true))
1101 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1106 /* We use greedy algorithm for inlining of small functions:
1107 All inline candidates are put into prioritized heap ordered in
1110 The inlining of small functions is bounded by unit growth parameters. */
1113 inline_small_functions (void)
1115 struct cgraph_node *node;
1116 struct cgraph_edge *edge;
1117 fibheap_t heap = fibheap_new ();
1118 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1119 int min_size, max_size;
1120 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1121 int initial_size = 0;
1123 if (flag_indirect_inlining)
1124 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1128 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1131 /* Compute overall unit size and other global parameters used by badness
1135 initialize_growth_caches ();
1137 for (node = cgraph_nodes; node; node = node->next)
1139 && !node->global.inlined_to)
1141 struct inline_summary *info = inline_summary (node);
1143 if (!DECL_EXTERNAL (node->decl))
1144 initial_size += info->size;
1146 for (edge = node->callers; edge; edge = edge->next_caller)
1147 if (max_count < edge->count)
1148 max_count = edge->count;
1151 overall_size = initial_size;
1152 max_size = compute_max_insns (overall_size);
1153 min_size = overall_size;
1155 /* Populate the heeap with all edges we might inline. */
1157 for (node = cgraph_nodes; node; node = node->next)
1159 && !node->global.inlined_to)
1162 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1163 cgraph_node_name (node), node->uid);
1165 for (edge = node->callers; edge; edge = edge->next_caller)
1166 if (edge->inline_failed
1167 && can_inline_edge_p (edge, true)
1168 && want_inline_small_function_p (edge, true)
1169 && edge->inline_failed)
1171 gcc_assert (!edge->aux);
1172 update_edge_key (heap, edge);
1176 gcc_assert (in_lto_p
1178 || (profile_info && flag_branch_probabilities));
1180 while (!fibheap_empty (heap))
1182 int old_size = overall_size;
1183 struct cgraph_node *where, *callee;
1184 int badness = fibheap_min_key (heap);
1185 int current_badness;
1188 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1189 gcc_assert (edge->aux);
1191 if (!edge->inline_failed)
1194 /* When updating the edge costs, we only decrease badness in the keys.
1195 Increases of badness are handled lazilly; when we see key with out
1196 of date value on it, we re-insert it now. */
1197 current_badness = edge_badness (edge, false);
1198 gcc_assert (current_badness >= badness);
1199 if (current_badness != badness)
1201 edge->aux = fibheap_insert (heap, current_badness, edge);
1205 if (!can_inline_edge_p (edge, true))
1208 callee = edge->callee;
1209 growth = estimate_edge_growth (edge);
1213 "\nConsidering %s with %i size\n",
1214 cgraph_node_name (edge->callee),
1215 inline_summary (edge->callee)->size);
1217 " to be inlined into %s in %s:%i\n"
1218 " Estimated growth after inlined into all is %+i insns.\n"
1219 " Estimated badness is %i, frequency %.2f.\n",
1220 cgraph_node_name (edge->caller),
1221 flag_wpa ? "unknown"
1222 : gimple_filename ((const_gimple) edge->call_stmt),
1224 : gimple_lineno ((const_gimple) edge->call_stmt),
1225 estimate_growth (edge->callee),
1227 edge->frequency / (double)CGRAPH_FREQ_BASE);
1229 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1231 if (dump_flags & TDF_DETAILS)
1232 edge_badness (edge, true);
1235 if (overall_size + growth > max_size
1236 && !DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl))
1238 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1239 report_inline_failed_reason (edge);
1243 if (!want_inline_small_function_p (edge, true))
1246 /* Heuristics for inlining small functions works poorly for
1247 recursive calls where we do efect similar to loop unrolling.
1248 When inliing such edge seems profitable, leave decision on
1249 specific inliner. */
1250 if (cgraph_edge_recursive_p (edge))
1252 where = edge->caller;
1253 if (where->global.inlined_to)
1254 where = where->global.inlined_to;
1255 if (!recursive_inlining (edge,
1256 flag_indirect_inlining
1257 ? &new_indirect_edges : NULL))
1259 edge->inline_failed = CIF_RECURSIVE_INLINING;
1262 /* Recursive inliner inlines all recursive calls of the function
1263 at once. Consequently we need to update all callee keys. */
1264 if (flag_indirect_inlining)
1265 add_new_edges_to_heap (heap, new_indirect_edges);
1266 update_all_callee_keys (heap, where, updated_nodes);
1270 struct cgraph_node *callee;
1271 struct cgraph_node *outer_node = NULL;
1274 /* Consider the case where self recursive function A is inlined into B.
1275 This is desired optimization in some cases, since it leads to effect
1276 similar of loop peeling and we might completely optimize out the
1277 recursive call. However we must be extra selective. */
1279 where = edge->caller;
1280 while (where->global.inlined_to)
1282 if (where->decl == edge->callee->decl)
1283 outer_node = where, depth++;
1284 where = where->callers->caller;
1287 && !want_inline_self_recursive_call_p (edge, outer_node,
1291 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1292 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1295 else if (depth && dump_file)
1296 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1298 callee = edge->callee;
1299 gcc_checking_assert (!callee->global.inlined_to);
1300 inline_call (edge, true, &new_indirect_edges, &overall_size);
1301 if (flag_indirect_inlining)
1302 add_new_edges_to_heap (heap, new_indirect_edges);
1304 /* We inlined last offline copy to the body. This might lead
1305 to callees of function having fewer call sites and thus they
1306 may need updating. */
1307 if (callee->global.inlined_to)
1308 update_all_callee_keys (heap, callee, updated_nodes);
1310 update_callee_keys (heap, edge->callee, updated_nodes);
1312 where = edge->caller;
1313 if (where->global.inlined_to)
1314 where = where->global.inlined_to;
1316 /* Our profitability metric can depend on local properties
1317 such as number of inlinable calls and size of the function body.
1318 After inlining these properties might change for the function we
1319 inlined into (since it's body size changed) and for the functions
1320 called by function we inlined (since number of it inlinable callers
1322 update_caller_keys (heap, where, updated_nodes);
1324 /* We removed one call of the function we just inlined. If offline
1325 copy is still needed, be sure to update the keys. */
1326 if (callee != where && !callee->global.inlined_to)
1327 update_caller_keys (heap, callee, updated_nodes);
1328 bitmap_clear (updated_nodes);
1333 " Inlined into %s which now has time %i and size %i,"
1334 "net change of %+i.\n",
1335 cgraph_node_name (edge->caller),
1336 inline_summary (edge->caller)->time,
1337 inline_summary (edge->caller)->size,
1338 overall_size - old_size);
1340 if (min_size > overall_size)
1342 min_size = overall_size;
1343 max_size = compute_max_insns (min_size);
1346 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1350 free_growth_caches ();
1351 if (new_indirect_edges)
1352 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1353 fibheap_delete (heap);
1356 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1357 initial_size, overall_size,
1358 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1359 BITMAP_FREE (updated_nodes);
1362 /* Flatten NODE. Performed both during early inlining and
1363 at IPA inlining time. */
1366 flatten_function (struct cgraph_node *node, bool early)
1368 struct cgraph_edge *e;
1370 /* We shouldn't be called recursively when we are being processed. */
1371 gcc_assert (node->aux == NULL);
1373 node->aux = (void *) node;
1375 for (e = node->callees; e; e = e->next_callee)
1377 struct cgraph_node *orig_callee;
1379 /* We've hit cycle? It is time to give up. */
1384 "Not inlining %s into %s to avoid cycle.\n",
1385 cgraph_node_name (e->callee),
1386 cgraph_node_name (e->caller));
1387 e->inline_failed = CIF_RECURSIVE_INLINING;
1391 /* When the edge is already inlined, we just need to recurse into
1392 it in order to fully flatten the leaves. */
1393 if (!e->inline_failed)
1395 flatten_function (e->callee, early);
1399 /* Flatten attribute needs to be processed during late inlining. For
1400 extra code quality we however do flattening during early optimization,
1403 ? !can_inline_edge_p (e, true)
1404 : !can_early_inline_edge_p (e))
1407 if (cgraph_edge_recursive_p (e))
1410 fprintf (dump_file, "Not inlining: recursive call.\n");
1414 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1415 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1418 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1422 /* Inline the edge and flatten the inline clone. Avoid
1423 recursing through the original node if the node was cloned. */
1425 fprintf (dump_file, " Inlining %s into %s.\n",
1426 cgraph_node_name (e->callee),
1427 cgraph_node_name (e->caller));
1428 orig_callee = e->callee;
1429 inline_call (e, true, NULL, NULL);
1430 if (e->callee != orig_callee)
1431 orig_callee->aux = (void *) node;
1432 flatten_function (e->callee, early);
1433 if (e->callee != orig_callee)
1434 orig_callee->aux = NULL;
1440 /* Decide on the inlining. We do so in the topological order to avoid
1441 expenses on updating data structures. */
1446 struct cgraph_node *node;
1448 struct cgraph_node **order =
1449 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1452 if (in_lto_p && flag_indirect_inlining)
1453 ipa_update_after_lto_read ();
1454 if (flag_indirect_inlining)
1455 ipa_create_all_structures_for_iinln ();
1458 dump_inline_summaries (dump_file);
1460 nnodes = cgraph_postorder (order);
1462 for (node = cgraph_nodes; node; node = node->next)
1466 fprintf (dump_file, "\nFlattening functions:\n");
1468 /* In the first pass handle functions to be flattened. Do this with
1469 a priority so none of our later choices will make this impossible. */
1470 for (i = nnodes - 1; i >= 0; i--)
1474 /* Handle nodes to be flattened.
1475 Ideally when processing callees we stop inlining at the
1476 entry of cycles, possibly cloning that entry point and
1477 try to flatten itself turning it into a self-recursive
1479 if (lookup_attribute ("flatten",
1480 DECL_ATTRIBUTES (node->decl)) != NULL)
1484 "Flattening %s\n", cgraph_node_name (node));
1485 flatten_function (node, false);
1489 inline_small_functions ();
1490 cgraph_remove_unreachable_nodes (true, dump_file);
1493 /* We already perform some inlining of functions called once during
1494 inlining small functions above. After unreachable nodes are removed,
1495 we still might do a quick check that nothing new is found. */
1496 if (flag_inline_functions_called_once)
1500 fprintf (dump_file, "\nDeciding on functions called once:\n");
1502 /* Inlining one function called once has good chance of preventing
1503 inlining other function into the same callee. Ideally we should
1504 work in priority order, but probably inlining hot functions first
1505 is good cut without the extra pain of maintaining the queue.
1507 ??? this is not really fitting the bill perfectly: inlining function
1508 into callee often leads to better optimization of callee due to
1509 increased context for optimization.
1510 For example if main() function calls a function that outputs help
1511 and then function that does the main optmization, we should inline
1512 the second with priority even if both calls are cold by themselves.
1514 We probably want to implement new predicate replacing our use of
1515 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1517 for (cold = 0; cold <= 1; cold ++)
1519 for (node = cgraph_nodes; node; node = node->next)
1521 if (want_inline_function_called_once_p (node)
1523 || cgraph_maybe_hot_edge_p (node->callers)))
1525 struct cgraph_node *caller = node->callers->caller;
1530 "\nInlining %s size %i.\n",
1531 cgraph_node_name (node), inline_summary (node)->size);
1533 " Called once from %s %i insns.\n",
1534 cgraph_node_name (node->callers->caller),
1535 inline_summary (node->callers->caller)->size);
1538 inline_call (node->callers, true, NULL, NULL);
1541 " Inlined into %s which now has %i size\n",
1542 cgraph_node_name (caller),
1543 inline_summary (caller)->size);
1549 /* Free ipa-prop structures if they are no longer needed. */
1550 if (flag_indirect_inlining)
1551 ipa_free_all_structures_after_iinln ();
1555 "\nInlined %i calls, eliminated %i functions\n\n",
1556 ncalls_inlined, nfunctions_inlined);
1559 dump_inline_summaries (dump_file);
1560 /* In WPA we use inline summaries for partitioning process. */
1562 inline_free_summary ();
1566 /* Inline always-inline function calls in NODE. */
1569 inline_always_inline_functions (struct cgraph_node *node)
1571 struct cgraph_edge *e;
1572 bool inlined = false;
1574 for (e = node->callees; e; e = e->next_callee)
1576 if (!DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
1579 if (cgraph_edge_recursive_p (e))
1582 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1583 cgraph_node_name (e->callee));
1584 e->inline_failed = CIF_RECURSIVE_INLINING;
1588 if (!can_early_inline_edge_p (e))
1592 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1593 cgraph_node_name (e->callee),
1594 cgraph_node_name (e->caller));
1595 inline_call (e, true, NULL, NULL);
1602 /* Decide on the inlining. We do so in the topological order to avoid
1603 expenses on updating data structures. */
1606 early_inline_small_functions (struct cgraph_node *node)
1608 struct cgraph_edge *e;
1609 bool inlined = false;
1611 for (e = node->callees; e; e = e->next_callee)
1613 if (!inline_summary (e->callee)->inlinable
1614 || !e->inline_failed)
1617 /* Do not consider functions not declared inline. */
1618 if (!DECL_DECLARED_INLINE_P (e->callee->decl)
1619 && !flag_inline_small_functions
1620 && !flag_inline_functions)
1624 fprintf (dump_file, "Considering inline candidate %s.\n",
1625 cgraph_node_name (e->callee));
1627 if (!can_early_inline_edge_p (e))
1630 if (cgraph_edge_recursive_p (e))
1633 fprintf (dump_file, " Not inlining: recursive call.\n");
1637 if (!want_early_inline_function_p (e))
1641 fprintf (dump_file, " Inlining %s into %s.\n",
1642 cgraph_node_name (e->callee),
1643 cgraph_node_name (e->caller));
1644 inline_call (e, true, NULL, NULL);
1651 /* Do inlining of small functions. Doing so early helps profiling and other
1652 passes to be somewhat more effective and avoids some code duplication in
1653 later real inlining pass for testcases with very many function calls. */
1655 early_inliner (void)
1657 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1658 struct cgraph_edge *edge;
1659 unsigned int todo = 0;
1661 bool inlined = false;
1666 #ifdef ENABLE_CHECKING
1667 verify_cgraph_node (node);
1670 /* Even when not optimizing or not inlining inline always-inline
1672 inlined = inline_always_inline_functions (node);
1676 || !flag_early_inlining
1677 /* Never inline regular functions into always-inline functions
1678 during incremental inlining. This sucks as functions calling
1679 always inline functions will get less optimized, but at the
1680 same time inlining of functions calling always inline
1681 function into an always inline function might introduce
1682 cycles of edges to be always inlined in the callgraph.
1684 We might want to be smarter and just avoid this type of inlining. */
1685 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
1687 else if (lookup_attribute ("flatten",
1688 DECL_ATTRIBUTES (node->decl)) != NULL)
1690 /* When the function is marked to be flattened, recursively inline
1694 "Flattening %s\n", cgraph_node_name (node));
1695 flatten_function (node, true);
1700 /* We iterate incremental inlining to get trivial cases of indirect
1702 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1703 && early_inline_small_functions (node))
1705 timevar_push (TV_INTEGRATION);
1706 todo |= optimize_inline_calls (current_function_decl);
1708 /* Technically we ought to recompute inline parameters so the new
1709 iteration of early inliner works as expected. We however have
1710 values approximately right and thus we only need to update edge
1711 info that might be cleared out for newly discovered edges. */
1712 for (edge = node->callees; edge; edge = edge->next_callee)
1714 struct inline_edge_summary *es = inline_edge_summary (edge);
1716 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1718 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1720 timevar_pop (TV_INTEGRATION);
1725 fprintf (dump_file, "Iterations: %i\n", iterations);
1730 timevar_push (TV_INTEGRATION);
1731 todo |= optimize_inline_calls (current_function_decl);
1732 timevar_pop (TV_INTEGRATION);
1735 cfun->always_inline_functions_inlined = true;
1740 struct gimple_opt_pass pass_early_inline =
1744 "einline", /* name */
1746 early_inliner, /* execute */
1749 0, /* static_pass_number */
1750 TV_INLINE_HEURISTICS, /* tv_id */
1751 PROP_ssa, /* properties_required */
1752 0, /* properties_provided */
1753 0, /* properties_destroyed */
1754 0, /* todo_flags_start */
1755 TODO_dump_func /* todo_flags_finish */
1760 /* When to run IPA inlining. Inlining of always-inline functions
1761 happens during early inlining. */
1764 gate_ipa_inline (void)
1766 /* ??? We'd like to skip this if not optimizing or not inlining as
1767 all always-inline functions have been processed by early
1768 inlining already. But this at least breaks EH with C++ as
1769 we need to unconditionally run fixup_cfg even at -O0.
1770 So leave it on unconditionally for now. */
1774 struct ipa_opt_pass_d pass_ipa_inline =
1778 "inline", /* name */
1779 gate_ipa_inline, /* gate */
1780 ipa_inline, /* execute */
1783 0, /* static_pass_number */
1784 TV_INLINE_HEURISTICS, /* tv_id */
1785 0, /* properties_required */
1786 0, /* properties_provided */
1787 0, /* properties_destroyed */
1788 TODO_remove_functions, /* todo_flags_finish */
1789 TODO_dump_cgraph | TODO_dump_func
1790 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
1792 inline_generate_summary, /* generate_summary */
1793 inline_write_summary, /* write_summary */
1794 inline_read_summary, /* read_summary */
1795 NULL, /* write_optimization_summary */
1796 NULL, /* read_optimization_summary */
1797 NULL, /* stmt_fixup */
1799 inline_transform, /* function_transform */
1800 NULL, /* variable_transform */