1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
39 The inliner itself is split into two passes:
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
95 #include "coretypes.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
108 #include "tree-pass.h"
109 #include "coverage.h"
112 #include "tree-flow.h"
113 #include "ipa-prop.h"
116 #include "ipa-inline.h"
117 #include "ipa-utils.h"
119 /* Statistics we collect about inlining algorithm. */
120 static int overall_size;
121 static gcov_type max_count;
123 /* Return false when inlining edge E would lead to violating
124 limits on function unit growth or stack usage growth.
126 The relative function body growth limit is present generally
127 to avoid problems with non-linear behavior of the compiler.
128 To allow inlining huge functions into tiny wrapper, the limit
129 is always based on the bigger of the two functions considered.
131 For stack growth limits we always base the growth in stack usage
132 of the callers. We want to prevent applications from segfaulting
133 on stack overflow when functions with huge stack frames gets
137 caller_growth_limits (struct cgraph_edge *e)
139 struct cgraph_node *to = e->caller;
140 struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL);
143 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
144 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
146 /* Look for function e->caller is inlined to. While doing
147 so work out the largest function body on the way. As
148 described above, we want to base our function growth
149 limits based on that. Not on the self size of the
150 outer function, not on the self size of inline code
151 we immediately inline to. This is the most relaxed
152 interpretation of the rule "do not grow large functions
153 too much in order to prevent compiler from exploding". */
156 info = inline_summary (to);
157 if (limit < info->self_size)
158 limit = info->self_size;
159 if (stack_size_limit < info->estimated_self_stack_size)
160 stack_size_limit = info->estimated_self_stack_size;
161 if (to->global.inlined_to)
162 to = to->callers->caller;
167 what_info = inline_summary (what);
169 if (limit < what_info->self_size)
170 limit = what_info->self_size;
172 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
174 /* Check the size after inlining against the function limits. But allow
175 the function to shrink if it went over the limits by forced inlining. */
176 newsize = estimate_size_after_inlining (to, e);
177 if (newsize >= info->size
178 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
181 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
185 if (!what_info->estimated_stack_size)
188 /* FIXME: Stack size limit often prevents inlining in Fortran programs
189 due to large i/o datastructures used by the Fortran front-end.
190 We ought to ignore this limit when we know that the edge is executed
191 on every invocation of the caller (i.e. its call statement dominates
192 exit block). We do not track this information, yet. */
193 stack_size_limit += ((gcov_type)stack_size_limit
194 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
196 inlined_stack = (outer_info->stack_frame_offset
197 + outer_info->estimated_self_stack_size
198 + what_info->estimated_stack_size);
199 /* Check new stack consumption with stack consumption at the place
201 if (inlined_stack > stack_size_limit
202 /* If function already has large stack usage from sibling
203 inline call, we can inline, too.
204 This bit overoptimistically assume that we are good at stack
206 && inlined_stack > info->estimated_stack_size
207 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
209 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
215 /* Dump info about why inlining has failed. */
218 report_inline_failed_reason (struct cgraph_edge *e)
222 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
223 cgraph_node_name (e->caller), e->caller->uid,
224 cgraph_node_name (e->callee), e->callee->uid,
225 cgraph_inline_failed_string (e->inline_failed));
229 /* Decide if we can inline the edge and possibly update
230 inline_failed reason.
231 We check whether inlining is possible at all and whether
232 caller growth limits allow doing so.
234 if REPORT is true, output reason to the dump file. */
237 can_inline_edge_p (struct cgraph_edge *e, bool report)
239 bool inlinable = true;
240 enum availability avail;
241 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, &avail);
242 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
243 tree callee_tree = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
245 gcc_assert (e->inline_failed);
247 if (!callee || !callee->analyzed)
249 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
252 else if (!inline_summary (callee)->inlinable)
254 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
257 else if (avail <= AVAIL_OVERWRITABLE)
259 e->inline_failed = CIF_OVERWRITABLE;
262 else if (e->call_stmt_cannot_inline_p)
264 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
267 /* Don't inline if the functions have different EH personalities. */
268 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
269 && DECL_FUNCTION_PERSONALITY (callee->decl)
270 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
271 != DECL_FUNCTION_PERSONALITY (callee->decl)))
273 e->inline_failed = CIF_EH_PERSONALITY;
276 /* Don't inline if the callee can throw non-call exceptions but the
278 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
279 Move the flag into cgraph node or mirror it in the inline summary. */
280 else if (DECL_STRUCT_FUNCTION (callee->decl)
281 && DECL_STRUCT_FUNCTION
282 (callee->decl)->can_throw_non_call_exceptions
283 && !(DECL_STRUCT_FUNCTION (e->caller->decl)
284 && DECL_STRUCT_FUNCTION
285 (e->caller->decl)->can_throw_non_call_exceptions))
287 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
290 /* Check compatibility of target optimization options. */
291 else if (!targetm.target_option.can_inline_p (e->caller->decl,
294 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
297 /* Check if caller growth allows the inlining. */
298 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
299 && !lookup_attribute ("flatten",
301 (e->caller->global.inlined_to
302 ? e->caller->global.inlined_to->decl
304 && !caller_growth_limits (e))
306 /* Don't inline a function with a higher optimization level than the
307 caller. FIXME: this is really just tip of iceberg of handling
308 optimization attribute. */
309 else if (caller_tree != callee_tree)
311 struct cl_optimization *caller_opt
312 = TREE_OPTIMIZATION ((caller_tree)
314 : optimization_default_node);
316 struct cl_optimization *callee_opt
317 = TREE_OPTIMIZATION ((callee_tree)
319 : optimization_default_node);
321 if (((caller_opt->x_optimize > callee_opt->x_optimize)
322 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
323 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
324 && !DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
326 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
331 /* Be sure that the cannot_inline_p flag is up to date. */
332 gcc_checking_assert (!e->call_stmt
333 || (gimple_call_cannot_inline_p (e->call_stmt)
334 == e->call_stmt_cannot_inline_p)
335 /* In -flto-partition=none mode we really keep things out of
336 sync because call_stmt_cannot_inline_p is set at cgraph
337 merging when function bodies are not there yet. */
338 || (in_lto_p && !gimple_call_cannot_inline_p (e->call_stmt)));
339 if (!inlinable && report)
340 report_inline_failed_reason (e);
345 /* Return true if the edge E is inlinable during early inlining. */
348 can_early_inline_edge_p (struct cgraph_edge *e)
350 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
352 /* Early inliner might get called at WPA stage when IPA pass adds new
353 function. In this case we can not really do any of early inlining
354 because function bodies are missing. */
355 if (!gimple_has_body_p (callee->decl))
357 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
360 /* In early inliner some of callees may not be in SSA form yet
361 (i.e. the callgraph is cyclic and we did not process
362 the callee by early inliner, yet). We don't have CIF code for this
363 case; later we will re-do the decision in the real inliner. */
364 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
365 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
368 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
371 if (!can_inline_edge_p (e, true))
377 /* Return true when N is leaf function. Accept cheap builtins
378 in leaf functions. */
381 leaf_node_p (struct cgraph_node *n)
383 struct cgraph_edge *e;
384 for (e = n->callees; e; e = e->next_callee)
385 if (!is_inexpensive_builtin (e->callee->decl))
391 /* Return true if we are interested in inlining small function. */
394 want_early_inline_function_p (struct cgraph_edge *e)
396 bool want_inline = true;
397 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
399 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
401 else if (!DECL_DECLARED_INLINE_P (callee->decl)
402 && !flag_inline_small_functions)
404 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
405 report_inline_failed_reason (e);
410 int growth = estimate_edge_growth (e);
413 else if (!cgraph_maybe_hot_edge_p (e)
417 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
418 "call is cold and code would grow by %i\n",
419 cgraph_node_name (e->caller), e->caller->uid,
420 cgraph_node_name (callee), callee->uid,
424 else if (!leaf_node_p (callee)
428 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
429 "callee is not leaf and code would grow by %i\n",
430 cgraph_node_name (e->caller), e->caller->uid,
431 cgraph_node_name (callee), callee->uid,
435 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
438 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
439 "growth %i exceeds --param early-inlining-insns\n",
440 cgraph_node_name (e->caller), e->caller->uid,
441 cgraph_node_name (callee), callee->uid,
449 /* Return true if we are interested in inlining small function.
450 When REPORT is true, report reason to dump file. */
453 want_inline_small_function_p (struct cgraph_edge *e, bool report)
455 bool want_inline = true;
456 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
458 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
460 else if (!DECL_DECLARED_INLINE_P (callee->decl)
461 && !flag_inline_small_functions)
463 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
468 int growth = estimate_edge_growth (e);
472 else if (DECL_DECLARED_INLINE_P (callee->decl)
473 && growth >= MAX_INLINE_INSNS_SINGLE)
475 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
478 else if (!DECL_DECLARED_INLINE_P (callee->decl)
479 && !flag_inline_functions)
481 e->inline_failed = CIF_NOT_DECLARED_INLINED;
484 else if (!DECL_DECLARED_INLINE_P (callee->decl)
485 && growth >= MAX_INLINE_INSNS_AUTO)
487 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
490 /* If call is cold, do not inline when function body would grow.
491 Still inline when the overall unit size will shrink because the offline
492 copy of function being eliminated.
494 This is slightly wrong on aggressive side: it is entirely possible
495 that function is called many times with a context where inlining
496 reduces code size and few times with a context where inlining increase
497 code size. Resoluting growth estimate will be negative even if it
498 would make more sense to keep offline copy and do not inline into the
499 call sites that makes the code size grow.
501 When badness orders the calls in a way that code reducing calls come
502 first, this situation is not a problem at all: after inlining all
503 "good" calls, we will realize that keeping the function around is
505 else if (!cgraph_maybe_hot_edge_p (e)
506 && (DECL_EXTERNAL (callee->decl)
508 /* Unlike for functions called once, we play unsafe with
509 COMDATs. We can allow that since we know functions
510 in consideration are small (and thus risk is small) and
511 moreover grow estimates already accounts that COMDAT
512 functions may or may not disappear when eliminated from
513 current unit. With good probability making aggressive
514 choice in all units is going to make overall program
517 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
519 cgraph_will_be_removed_from_program_if_no_direct_calls */
521 || !cgraph_can_remove_if_no_direct_calls_p (callee)
522 || estimate_growth (callee) > 0))
524 e->inline_failed = CIF_UNLIKELY_CALL;
528 if (!want_inline && report)
529 report_inline_failed_reason (e);
533 /* EDGE is self recursive edge.
534 We hand two cases - when function A is inlining into itself
535 or when function A is being inlined into another inliner copy of function
538 In first case OUTER_NODE points to the toplevel copy of A, while
539 in the second case OUTER_NODE points to the outermost copy of A in B.
541 In both cases we want to be extra selective since
542 inlining the call will just introduce new recursive calls to appear. */
545 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
546 struct cgraph_node *outer_node,
550 char const *reason = NULL;
551 bool want_inline = true;
552 int caller_freq = CGRAPH_FREQ_BASE;
553 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
555 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
556 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
558 if (!cgraph_maybe_hot_edge_p (edge))
560 reason = "recursive call is cold";
563 else if (max_count && !outer_node->count)
565 reason = "not executed in profile";
568 else if (depth > max_depth)
570 reason = "--param max-inline-recursive-depth exceeded.";
574 if (outer_node->global.inlined_to)
575 caller_freq = outer_node->callers->frequency;
579 /* Inlining of self recursive function into copy of itself within other function
580 is transformation similar to loop peeling.
582 Peeling is profitable if we can inline enough copies to make probability
583 of actual call to the self recursive function very small. Be sure that
584 the probability of recursion is small.
586 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
587 This way the expected number of recision is at most max_depth. */
590 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
593 for (i = 1; i < depth; i++)
594 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
596 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
599 reason = "profile of recursive call is too large";
603 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
606 reason = "frequency of recursive call is too large";
610 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
611 depth is large. We reduce function call overhead and increase chances that
612 things fit in hardware return predictor.
614 Recursive inlining might however increase cost of stack frame setup
615 actually slowing down functions whose recursion tree is wide rather than
618 Deciding reliably on when to do recursive inlining without profile feedback
619 is tricky. For now we disable recursive inlining when probability of self
622 Recursive inlining of self recursive call within loop also results in large loop
623 depths that generally optimize badly. We may want to throttle down inlining
624 in those cases. In particular this seems to happen in one of libstdc++ rb tree
629 && (edge->count * 100 / outer_node->count
630 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
632 reason = "profile of recursive call is too small";
636 && (edge->frequency * 100 / caller_freq
637 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
639 reason = "frequency of recursive call is too small";
643 if (!want_inline && dump_file)
644 fprintf (dump_file, " not inlining recursively: %s\n", reason);
648 /* Return true when NODE has caller other than EDGE.
649 Worker for cgraph_for_node_and_aliases. */
652 check_caller_edge (struct cgraph_node *node, void *edge)
654 return (node->callers
655 && node->callers != edge);
659 /* Decide if NODE is called once inlining it would eliminate need
660 for the offline copy of function. */
663 want_inline_function_called_once_p (struct cgraph_node *node)
665 struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL);
666 /* Already inlined? */
667 if (function->global.inlined_to)
669 /* Zero or more then one callers? */
671 || node->callers->next_caller)
673 /* Maybe other aliases has more direct calls. */
674 if (cgraph_for_node_and_aliases (node, check_caller_edge, node->callers, true))
676 /* Recursive call makes no sense to inline. */
677 if (cgraph_edge_recursive_p (node->callers))
679 /* External functions are not really in the unit, so inlining
680 them when called once would just increase the program size. */
681 if (DECL_EXTERNAL (function->decl))
683 /* Offline body must be optimized out. */
684 if (!cgraph_will_be_removed_from_program_if_no_direct_calls (function))
686 if (!can_inline_edge_p (node->callers, true))
692 /* Return relative time improvement for inlining EDGE in range
696 relative_time_benefit (struct inline_summary *callee_info,
697 struct cgraph_edge *edge,
701 gcov_type uninlined_call_time;
703 uninlined_call_time =
706 + inline_edge_summary (edge)->call_stmt_time
707 + CGRAPH_FREQ_BASE / 2) * edge->frequency
709 /* Compute relative time benefit, i.e. how much the call becomes faster.
710 ??? perhaps computing how much the caller+calle together become faster
711 would lead to more realistic results. */
712 if (!uninlined_call_time)
713 uninlined_call_time = 1;
715 (uninlined_call_time - time_growth) * 256 / (uninlined_call_time);
716 relbenefit = MIN (relbenefit, 512);
717 relbenefit = MAX (relbenefit, 1);
722 /* A cost model driving the inlining heuristics in a way so the edges with
723 smallest badness are inlined first. After each inlining is performed
724 the costs of all caller edges of nodes affected are recomputed so the
725 metrics may accurately depend on values such as number of inlinable callers
726 of the function or function body size. */
729 edge_badness (struct cgraph_edge *edge, bool dump)
732 int growth, time_growth;
733 struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee,
735 struct inline_summary *callee_info = inline_summary (callee);
737 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
740 growth = estimate_edge_growth (edge);
741 time_growth = estimate_edge_time (edge);
745 fprintf (dump_file, " Badness calculation for %s -> %s\n",
746 cgraph_node_name (edge->caller),
747 cgraph_node_name (callee));
748 fprintf (dump_file, " size growth %i, time growth %i\n",
753 /* Always prefer inlining saving code size. */
756 badness = INT_MIN / 2 + growth;
758 fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness,
762 /* When profiling is available, compute badness as:
764 relative_edge_count * relative_time_benefit
765 goodness = -------------------------------------------
769 The fraction is upside down, becuase on edge counts and time beneits
770 the bounds are known. Edge growth is essentially unlimited. */
774 int relbenefit = relative_time_benefit (callee_info, edge, time_growth);
777 ((double) edge->count * INT_MIN / 2 / max_count / 512) *
778 relative_time_benefit (callee_info, edge, time_growth)) / growth;
780 /* Be sure that insanity of the profile won't lead to increasing counts
781 in the scalling and thus to overflow in the computation above. */
782 gcc_assert (max_count >= edge->count);
786 " %i (relative %f): profile info. Relative count %f"
787 " * Relative benefit %f\n",
788 (int) badness, (double) badness / INT_MIN,
789 (double) edge->count / max_count,
790 relbenefit * 100 / 256.0);
794 /* When function local profile is available. Compute badness as:
798 badness = -------------------------------------- + growth_for-all
799 relative_time_benefit * edge_frequency
802 else if (flag_guess_branch_prob)
804 int div = edge->frequency * (1<<10) / CGRAPH_FREQ_MAX;
808 gcc_checking_assert (edge->frequency <= CGRAPH_FREQ_MAX);
809 div *= relative_time_benefit (callee_info, edge, time_growth);
811 /* frequency is normalized in range 1...2^10.
812 relbenefit in range 1...2^9
813 DIV should be in range 1....2^19. */
814 gcc_checking_assert (div >= 1 && div <= (1<<19));
816 /* Result must be integer in range 0...INT_MAX.
817 Set the base of fixed point calculation so we don't lose much of
818 precision for small bandesses (those are interesting) yet we don't
819 overflow for growths that are still in interesting range. */
820 badness = ((gcov_type)growth) * (1<<18);
821 badness = (badness + div / 2) / div;
823 /* Overall growth of inlining all calls of function matters: we want to
824 inline so offline copy of function is no longer needed.
826 Additionally functions that can be fully inlined without much of
827 effort are better inline candidates than functions that can be fully
828 inlined only after noticeable overall unit growths. The latter
829 are better in a sense compressing of code size by factoring out common
830 code into separate function shared by multiple code paths.
832 We might mix the valud into the fraction by taking into account
833 relative growth of the unit, but for now just add the number
834 into resulting fraction. */
835 growth_for_all = estimate_growth (callee);
836 badness += growth_for_all;
837 if (badness > INT_MAX - 1)
838 badness = INT_MAX - 1;
842 " %i: guessed profile. frequency %f, overall growth %i,"
843 " benefit %f%%, divisor %i\n",
844 (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE, growth_for_all,
845 relative_time_benefit (callee_info, edge, time_growth) * 100 / 256.0, div);
848 /* When function local profile is not available or it does not give
849 useful information (ie frequency is zero), base the cost on
850 loop nest and overall size growth, so we optimize for overall number
851 of functions fully inlined in program. */
854 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
855 badness = estimate_growth (callee) * 256;
857 /* Decrease badness if call is nested. */
865 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
869 /* Ensure that we did not overflow in all the fixed point math above. */
870 gcc_assert (badness >= INT_MIN);
871 gcc_assert (badness <= INT_MAX - 1);
872 /* Make recursive inlining happen always after other inlining is done. */
873 if (cgraph_edge_recursive_p (edge))
879 /* Recompute badness of EDGE and update its key in HEAP if needed. */
881 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
883 int badness = edge_badness (edge, false);
886 fibnode_t n = (fibnode_t) edge->aux;
887 gcc_checking_assert (n->data == edge);
889 /* fibheap_replace_key only decrease the keys.
890 When we increase the key we do not update heap
891 and instead re-insert the element once it becomes
892 a minimum of heap. */
893 if (badness < n->key)
895 if (dump_file && (dump_flags & TDF_DETAILS))
898 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
899 cgraph_node_name (edge->caller), edge->caller->uid,
900 cgraph_node_name (edge->callee), edge->callee->uid,
904 fibheap_replace_key (heap, n, badness);
905 gcc_checking_assert (n->key == badness);
910 if (dump_file && (dump_flags & TDF_DETAILS))
913 " enqueuing call %s/%i -> %s/%i, badness %i\n",
914 cgraph_node_name (edge->caller), edge->caller->uid,
915 cgraph_node_name (edge->callee), edge->callee->uid,
918 edge->aux = fibheap_insert (heap, badness, edge);
924 All caller edges needs to be resetted because
925 size estimates change. Similarly callees needs reset
926 because better context may be known. */
929 reset_edge_caches (struct cgraph_node *node)
931 struct cgraph_edge *edge;
932 struct cgraph_edge *e = node->callees;
933 struct cgraph_node *where = node;
937 if (where->global.inlined_to)
938 where = where->global.inlined_to;
940 /* WHERE body size has changed, the cached growth is invalid. */
941 reset_node_growth_cache (where);
943 for (edge = where->callers; edge; edge = edge->next_caller)
944 if (edge->inline_failed)
945 reset_edge_growth_cache (edge);
946 for (i = 0; ipa_ref_list_refering_iterate (&where->ref_list, i, ref); i++)
947 if (ref->use == IPA_REF_ALIAS)
948 reset_edge_caches (ipa_ref_refering_node (ref));
954 if (!e->inline_failed && e->callee->callees)
955 e = e->callee->callees;
958 if (e->inline_failed)
959 reset_edge_growth_cache (e);
966 if (e->caller == node)
968 e = e->caller->callers;
970 while (!e->next_callee);
976 /* Recompute HEAP nodes for each of caller of NODE.
977 UPDATED_NODES track nodes we already visited, to avoid redundant work.
978 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
979 it is inlinable. Otherwise check all edges. */
982 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
983 bitmap updated_nodes,
984 struct cgraph_edge *check_inlinablity_for)
986 struct cgraph_edge *edge;
990 if ((!node->alias && !inline_summary (node)->inlinable)
991 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
992 || node->global.inlined_to)
994 if (!bitmap_set_bit (updated_nodes, node->uid))
997 for (i = 0; ipa_ref_list_refering_iterate (&node->ref_list, i, ref); i++)
998 if (ref->use == IPA_REF_ALIAS)
1000 struct cgraph_node *alias = ipa_ref_refering_node (ref);
1001 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1004 for (edge = node->callers; edge; edge = edge->next_caller)
1005 if (edge->inline_failed)
1007 if (!check_inlinablity_for
1008 || check_inlinablity_for == edge)
1010 if (can_inline_edge_p (edge, false)
1011 && want_inline_small_function_p (edge, false))
1012 update_edge_key (heap, edge);
1015 report_inline_failed_reason (edge);
1016 fibheap_delete_node (heap, (fibnode_t) edge->aux);
1021 update_edge_key (heap, edge);
1025 /* Recompute HEAP nodes for each uninlined call in NODE.
1026 This is used when we know that edge badnesses are going only to increase
1027 (we introduced new call site) and thus all we need is to insert newly
1028 created edges into heap. */
1031 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
1032 bitmap updated_nodes)
1034 struct cgraph_edge *e = node->callees;
1039 if (!e->inline_failed && e->callee->callees)
1040 e = e->callee->callees;
1043 enum availability avail;
1044 struct cgraph_node *callee;
1045 /* We do not reset callee growth cache here. Since we added a new call,
1046 growth chould have just increased and consequentely badness metric
1047 don't need updating. */
1048 if (e->inline_failed
1049 && (callee = cgraph_function_or_thunk_node (e->callee, &avail))
1050 && inline_summary (callee)->inlinable
1051 && cgraph_function_body_availability (callee) >= AVAIL_AVAILABLE
1052 && !bitmap_bit_p (updated_nodes, callee->uid))
1054 if (can_inline_edge_p (e, false)
1055 && want_inline_small_function_p (e, false))
1056 update_edge_key (heap, e);
1059 report_inline_failed_reason (e);
1060 fibheap_delete_node (heap, (fibnode_t) e->aux);
1070 if (e->caller == node)
1072 e = e->caller->callers;
1074 while (!e->next_callee);
1080 /* Recompute heap nodes for each of caller edges of each of callees.
1081 Walk recursively into all inline clones. */
1084 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
1085 bitmap updated_nodes)
1087 struct cgraph_edge *e = node->callees;
1091 if (!e->inline_failed && e->callee->callees)
1092 e = e->callee->callees;
1095 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
1098 /* We inlined and thus callees might have different number of calls.
1099 Reset their caches */
1100 reset_node_growth_cache (callee);
1101 if (e->inline_failed)
1102 update_caller_keys (heap, callee, updated_nodes, e);
1109 if (e->caller == node)
1111 e = e->caller->callers;
1113 while (!e->next_callee);
1119 /* Enqueue all recursive calls from NODE into priority queue depending on
1120 how likely we want to recursively inline the call. */
1123 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1126 struct cgraph_edge *e;
1127 enum availability avail;
1129 for (e = where->callees; e; e = e->next_callee)
1130 if (e->callee == node
1131 || (cgraph_function_or_thunk_node (e->callee, &avail) == node
1132 && avail > AVAIL_OVERWRITABLE))
1134 /* When profile feedback is available, prioritize by expected number
1136 fibheap_insert (heap,
1137 !max_count ? -e->frequency
1138 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1141 for (e = where->callees; e; e = e->next_callee)
1142 if (!e->inline_failed)
1143 lookup_recursive_calls (node, e->callee, heap);
1146 /* Decide on recursive inlining: in the case function has recursive calls,
1147 inline until body size reaches given argument. If any new indirect edges
1148 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1152 recursive_inlining (struct cgraph_edge *edge,
1153 VEC (cgraph_edge_p, heap) **new_edges)
1155 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1157 struct cgraph_node *node;
1158 struct cgraph_edge *e;
1159 struct cgraph_node *master_clone = NULL, *next;
1163 node = edge->caller;
1164 if (node->global.inlined_to)
1165 node = node->global.inlined_to;
1167 if (DECL_DECLARED_INLINE_P (node->decl))
1168 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1170 /* Make sure that function is small enough to be considered for inlining. */
1171 if (estimate_size_after_inlining (node, edge) >= limit)
1173 heap = fibheap_new ();
1174 lookup_recursive_calls (node, node, heap);
1175 if (fibheap_empty (heap))
1177 fibheap_delete (heap);
1183 " Performing recursive inlining on %s\n",
1184 cgraph_node_name (node));
1186 /* Do the inlining and update list of recursive call during process. */
1187 while (!fibheap_empty (heap))
1189 struct cgraph_edge *curr
1190 = (struct cgraph_edge *) fibheap_extract_min (heap);
1191 struct cgraph_node *cnode;
1193 if (estimate_size_after_inlining (node, curr) > limit)
1196 if (!can_inline_edge_p (curr, true))
1200 for (cnode = curr->caller;
1201 cnode->global.inlined_to; cnode = cnode->callers->caller)
1202 if (node->decl == curr->callee->decl)
1205 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1211 " Inlining call of depth %i", depth);
1214 fprintf (dump_file, " called approx. %.2f times per call",
1215 (double)curr->count / node->count);
1217 fprintf (dump_file, "\n");
1221 /* We need original clone to copy around. */
1222 master_clone = cgraph_clone_node (node, node->decl,
1223 node->count, CGRAPH_FREQ_BASE,
1225 for (e = master_clone->callees; e; e = e->next_callee)
1226 if (!e->inline_failed)
1227 clone_inlined_nodes (e, true, false, NULL);
1230 cgraph_redirect_edge_callee (curr, master_clone);
1231 inline_call (curr, false, new_edges, &overall_size);
1232 lookup_recursive_calls (node, curr->callee, heap);
1236 if (!fibheap_empty (heap) && dump_file)
1237 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1238 fibheap_delete (heap);
1245 "\n Inlined %i times, "
1246 "body grown from size %i to %i, time %i to %i\n", n,
1247 inline_summary (master_clone)->size, inline_summary (node)->size,
1248 inline_summary (master_clone)->time, inline_summary (node)->time);
1250 /* Remove master clone we used for inlining. We rely that clones inlined
1251 into master clone gets queued just before master clone so we don't
1253 for (node = cgraph_nodes; node != master_clone;
1257 if (node->global.inlined_to == master_clone)
1258 cgraph_remove_node (node);
1260 cgraph_remove_node (master_clone);
1265 /* Given whole compilation unit estimate of INSNS, compute how large we can
1266 allow the unit to grow. */
1269 compute_max_insns (int insns)
1271 int max_insns = insns;
1272 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1273 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1275 return ((HOST_WIDEST_INT) max_insns
1276 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1280 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1283 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1285 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1287 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1289 gcc_assert (!edge->aux);
1290 if (edge->inline_failed
1291 && can_inline_edge_p (edge, true)
1292 && want_inline_small_function_p (edge, true))
1293 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1298 /* We use greedy algorithm for inlining of small functions:
1299 All inline candidates are put into prioritized heap ordered in
1302 The inlining of small functions is bounded by unit growth parameters. */
1305 inline_small_functions (void)
1307 struct cgraph_node *node;
1308 struct cgraph_edge *edge;
1309 fibheap_t heap = fibheap_new ();
1310 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1311 int min_size, max_size;
1312 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1313 int initial_size = 0;
1315 if (flag_indirect_inlining)
1316 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1320 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1323 /* Compute overall unit size and other global parameters used by badness
1327 initialize_growth_caches ();
1329 FOR_EACH_DEFINED_FUNCTION (node)
1330 if (!node->global.inlined_to)
1332 if (cgraph_function_with_gimple_body_p (node)
1333 || node->thunk.thunk_p)
1335 struct inline_summary *info = inline_summary (node);
1337 if (!DECL_EXTERNAL (node->decl))
1338 initial_size += info->size;
1341 for (edge = node->callers; edge; edge = edge->next_caller)
1342 if (max_count < edge->count)
1343 max_count = edge->count;
1346 overall_size = initial_size;
1347 max_size = compute_max_insns (overall_size);
1348 min_size = overall_size;
1350 /* Populate the heeap with all edges we might inline. */
1352 FOR_EACH_DEFINED_FUNCTION (node)
1353 if (!node->global.inlined_to)
1356 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1357 cgraph_node_name (node), node->uid);
1359 for (edge = node->callers; edge; edge = edge->next_caller)
1360 if (edge->inline_failed
1361 && can_inline_edge_p (edge, true)
1362 && want_inline_small_function_p (edge, true)
1363 && edge->inline_failed)
1365 gcc_assert (!edge->aux);
1366 update_edge_key (heap, edge);
1370 gcc_assert (in_lto_p
1372 || (profile_info && flag_branch_probabilities));
1374 while (!fibheap_empty (heap))
1376 int old_size = overall_size;
1377 struct cgraph_node *where, *callee;
1378 int badness = fibheap_min_key (heap);
1379 int current_badness;
1382 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1383 gcc_assert (edge->aux);
1385 if (!edge->inline_failed)
1388 /* Be sure that caches are maintained consistent. */
1389 #ifdef ENABLE_CHECKING
1390 reset_edge_growth_cache (edge);
1391 reset_node_growth_cache (edge->callee);
1394 /* When updating the edge costs, we only decrease badness in the keys.
1395 Increases of badness are handled lazilly; when we see key with out
1396 of date value on it, we re-insert it now. */
1397 current_badness = edge_badness (edge, false);
1398 gcc_assert (current_badness >= badness);
1399 if (current_badness != badness)
1401 edge->aux = fibheap_insert (heap, current_badness, edge);
1405 if (!can_inline_edge_p (edge, true))
1408 callee = cgraph_function_or_thunk_node (edge->callee, NULL);
1409 growth = estimate_edge_growth (edge);
1413 "\nConsidering %s with %i size\n",
1414 cgraph_node_name (callee),
1415 inline_summary (callee)->size);
1417 " to be inlined into %s in %s:%i\n"
1418 " Estimated growth after inlined into all is %+i insns.\n"
1419 " Estimated badness is %i, frequency %.2f.\n",
1420 cgraph_node_name (edge->caller),
1421 flag_wpa ? "unknown"
1422 : gimple_filename ((const_gimple) edge->call_stmt),
1424 : gimple_lineno ((const_gimple) edge->call_stmt),
1425 estimate_growth (callee),
1427 edge->frequency / (double)CGRAPH_FREQ_BASE);
1429 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1431 if (dump_flags & TDF_DETAILS)
1432 edge_badness (edge, true);
1435 if (overall_size + growth > max_size
1436 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1438 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1439 report_inline_failed_reason (edge);
1443 if (!want_inline_small_function_p (edge, true))
1446 /* Heuristics for inlining small functions works poorly for
1447 recursive calls where we do efect similar to loop unrolling.
1448 When inliing such edge seems profitable, leave decision on
1449 specific inliner. */
1450 if (cgraph_edge_recursive_p (edge))
1452 where = edge->caller;
1453 if (where->global.inlined_to)
1454 where = where->global.inlined_to;
1455 if (!recursive_inlining (edge,
1456 flag_indirect_inlining
1457 ? &new_indirect_edges : NULL))
1459 edge->inline_failed = CIF_RECURSIVE_INLINING;
1462 reset_edge_caches (where);
1463 /* Recursive inliner inlines all recursive calls of the function
1464 at once. Consequently we need to update all callee keys. */
1465 if (flag_indirect_inlining)
1466 add_new_edges_to_heap (heap, new_indirect_edges);
1467 update_all_callee_keys (heap, where, updated_nodes);
1471 struct cgraph_node *outer_node = NULL;
1474 /* Consider the case where self recursive function A is inlined into B.
1475 This is desired optimization in some cases, since it leads to effect
1476 similar of loop peeling and we might completely optimize out the
1477 recursive call. However we must be extra selective. */
1479 where = edge->caller;
1480 while (where->global.inlined_to)
1482 if (where->decl == callee->decl)
1483 outer_node = where, depth++;
1484 where = where->callers->caller;
1487 && !want_inline_self_recursive_call_p (edge, outer_node,
1491 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1492 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1495 else if (depth && dump_file)
1496 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1498 gcc_checking_assert (!callee->global.inlined_to);
1499 inline_call (edge, true, &new_indirect_edges, &overall_size);
1500 if (flag_indirect_inlining)
1501 add_new_edges_to_heap (heap, new_indirect_edges);
1503 reset_edge_caches (edge->callee);
1504 reset_node_growth_cache (callee);
1506 /* We inlined last offline copy to the body. This might lead
1507 to callees of function having fewer call sites and thus they
1508 may need updating. */
1509 if (callee->global.inlined_to)
1510 update_all_callee_keys (heap, callee, updated_nodes);
1512 update_callee_keys (heap, edge->callee, updated_nodes);
1514 where = edge->caller;
1515 if (where->global.inlined_to)
1516 where = where->global.inlined_to;
1518 /* Our profitability metric can depend on local properties
1519 such as number of inlinable calls and size of the function body.
1520 After inlining these properties might change for the function we
1521 inlined into (since it's body size changed) and for the functions
1522 called by function we inlined (since number of it inlinable callers
1524 update_caller_keys (heap, where, updated_nodes, NULL);
1526 /* We removed one call of the function we just inlined. If offline
1527 copy is still needed, be sure to update the keys. */
1528 if (callee != where && !callee->global.inlined_to)
1529 update_caller_keys (heap, callee, updated_nodes, NULL);
1530 bitmap_clear (updated_nodes);
1535 " Inlined into %s which now has time %i and size %i,"
1536 "net change of %+i.\n",
1537 cgraph_node_name (edge->caller),
1538 inline_summary (edge->caller)->time,
1539 inline_summary (edge->caller)->size,
1540 overall_size - old_size);
1542 if (min_size > overall_size)
1544 min_size = overall_size;
1545 max_size = compute_max_insns (min_size);
1548 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1552 free_growth_caches ();
1553 if (new_indirect_edges)
1554 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1555 fibheap_delete (heap);
1558 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1559 initial_size, overall_size,
1560 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1561 BITMAP_FREE (updated_nodes);
1564 /* Flatten NODE. Performed both during early inlining and
1565 at IPA inlining time. */
1568 flatten_function (struct cgraph_node *node, bool early)
1570 struct cgraph_edge *e;
1572 /* We shouldn't be called recursively when we are being processed. */
1573 gcc_assert (node->aux == NULL);
1575 node->aux = (void *) node;
1577 for (e = node->callees; e; e = e->next_callee)
1579 struct cgraph_node *orig_callee;
1580 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1582 /* We've hit cycle? It is time to give up. */
1587 "Not inlining %s into %s to avoid cycle.\n",
1588 cgraph_node_name (callee),
1589 cgraph_node_name (e->caller));
1590 e->inline_failed = CIF_RECURSIVE_INLINING;
1594 /* When the edge is already inlined, we just need to recurse into
1595 it in order to fully flatten the leaves. */
1596 if (!e->inline_failed)
1598 flatten_function (callee, early);
1602 /* Flatten attribute needs to be processed during late inlining. For
1603 extra code quality we however do flattening during early optimization,
1606 ? !can_inline_edge_p (e, true)
1607 : !can_early_inline_edge_p (e))
1610 if (cgraph_edge_recursive_p (e))
1613 fprintf (dump_file, "Not inlining: recursive call.\n");
1617 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1618 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
1621 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1625 /* Inline the edge and flatten the inline clone. Avoid
1626 recursing through the original node if the node was cloned. */
1628 fprintf (dump_file, " Inlining %s into %s.\n",
1629 cgraph_node_name (callee),
1630 cgraph_node_name (e->caller));
1631 orig_callee = callee;
1632 inline_call (e, true, NULL, NULL);
1633 if (e->callee != orig_callee)
1634 orig_callee->aux = (void *) node;
1635 flatten_function (e->callee, early);
1636 if (e->callee != orig_callee)
1637 orig_callee->aux = NULL;
1643 /* Decide on the inlining. We do so in the topological order to avoid
1644 expenses on updating data structures. */
1649 struct cgraph_node *node;
1651 struct cgraph_node **order =
1652 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1655 if (in_lto_p && flag_indirect_inlining)
1656 ipa_update_after_lto_read ();
1657 if (flag_indirect_inlining)
1658 ipa_create_all_structures_for_iinln ();
1661 dump_inline_summaries (dump_file);
1663 nnodes = ipa_reverse_postorder (order);
1665 for (node = cgraph_nodes; node; node = node->next)
1669 fprintf (dump_file, "\nFlattening functions:\n");
1671 /* In the first pass handle functions to be flattened. Do this with
1672 a priority so none of our later choices will make this impossible. */
1673 for (i = nnodes - 1; i >= 0; i--)
1677 /* Handle nodes to be flattened.
1678 Ideally when processing callees we stop inlining at the
1679 entry of cycles, possibly cloning that entry point and
1680 try to flatten itself turning it into a self-recursive
1682 if (lookup_attribute ("flatten",
1683 DECL_ATTRIBUTES (node->decl)) != NULL)
1687 "Flattening %s\n", cgraph_node_name (node));
1688 flatten_function (node, false);
1692 inline_small_functions ();
1693 cgraph_remove_unreachable_nodes (true, dump_file);
1696 /* We already perform some inlining of functions called once during
1697 inlining small functions above. After unreachable nodes are removed,
1698 we still might do a quick check that nothing new is found. */
1699 if (flag_inline_functions_called_once)
1703 fprintf (dump_file, "\nDeciding on functions called once:\n");
1705 /* Inlining one function called once has good chance of preventing
1706 inlining other function into the same callee. Ideally we should
1707 work in priority order, but probably inlining hot functions first
1708 is good cut without the extra pain of maintaining the queue.
1710 ??? this is not really fitting the bill perfectly: inlining function
1711 into callee often leads to better optimization of callee due to
1712 increased context for optimization.
1713 For example if main() function calls a function that outputs help
1714 and then function that does the main optmization, we should inline
1715 the second with priority even if both calls are cold by themselves.
1717 We probably want to implement new predicate replacing our use of
1718 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1720 for (cold = 0; cold <= 1; cold ++)
1722 for (node = cgraph_nodes; node; node = node->next)
1724 if (want_inline_function_called_once_p (node)
1726 || cgraph_maybe_hot_edge_p (node->callers)))
1728 struct cgraph_node *caller = node->callers->caller;
1733 "\nInlining %s size %i.\n",
1734 cgraph_node_name (node), inline_summary (node)->size);
1736 " Called once from %s %i insns.\n",
1737 cgraph_node_name (node->callers->caller),
1738 inline_summary (node->callers->caller)->size);
1741 inline_call (node->callers, true, NULL, NULL);
1744 " Inlined into %s which now has %i size\n",
1745 cgraph_node_name (caller),
1746 inline_summary (caller)->size);
1752 /* Free ipa-prop structures if they are no longer needed. */
1753 if (flag_indirect_inlining)
1754 ipa_free_all_structures_after_iinln ();
1758 "\nInlined %i calls, eliminated %i functions\n\n",
1759 ncalls_inlined, nfunctions_inlined);
1762 dump_inline_summaries (dump_file);
1763 /* In WPA we use inline summaries for partitioning process. */
1765 inline_free_summary ();
1769 /* Inline always-inline function calls in NODE. */
1772 inline_always_inline_functions (struct cgraph_node *node)
1774 struct cgraph_edge *e;
1775 bool inlined = false;
1777 for (e = node->callees; e; e = e->next_callee)
1779 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1780 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1783 if (cgraph_edge_recursive_p (e))
1786 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1787 cgraph_node_name (e->callee));
1788 e->inline_failed = CIF_RECURSIVE_INLINING;
1792 if (!can_early_inline_edge_p (e))
1796 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1797 cgraph_node_name (e->callee),
1798 cgraph_node_name (e->caller));
1799 inline_call (e, true, NULL, NULL);
1806 /* Decide on the inlining. We do so in the topological order to avoid
1807 expenses on updating data structures. */
1810 early_inline_small_functions (struct cgraph_node *node)
1812 struct cgraph_edge *e;
1813 bool inlined = false;
1815 for (e = node->callees; e; e = e->next_callee)
1817 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1818 if (!inline_summary (callee)->inlinable
1819 || !e->inline_failed)
1822 /* Do not consider functions not declared inline. */
1823 if (!DECL_DECLARED_INLINE_P (callee->decl)
1824 && !flag_inline_small_functions
1825 && !flag_inline_functions)
1829 fprintf (dump_file, "Considering inline candidate %s.\n",
1830 cgraph_node_name (callee));
1832 if (!can_early_inline_edge_p (e))
1835 if (cgraph_edge_recursive_p (e))
1838 fprintf (dump_file, " Not inlining: recursive call.\n");
1842 if (!want_early_inline_function_p (e))
1846 fprintf (dump_file, " Inlining %s into %s.\n",
1847 cgraph_node_name (callee),
1848 cgraph_node_name (e->caller));
1849 inline_call (e, true, NULL, NULL);
1856 /* Do inlining of small functions. Doing so early helps profiling and other
1857 passes to be somewhat more effective and avoids some code duplication in
1858 later real inlining pass for testcases with very many function calls. */
1860 early_inliner (void)
1862 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1863 struct cgraph_edge *edge;
1864 unsigned int todo = 0;
1866 bool inlined = false;
1871 /* Do nothing if datastructures for ipa-inliner are already computed. This
1872 happens when some pass decides to construct new function and
1873 cgraph_add_new_function calls lowering passes and early optimization on
1874 it. This may confuse ourself when early inliner decide to inline call to
1875 function clone, because function clones don't have parameter list in
1876 ipa-prop matching their signature. */
1877 if (ipa_node_params_vector)
1880 #ifdef ENABLE_CHECKING
1881 verify_cgraph_node (node);
1884 /* Even when not optimizing or not inlining inline always-inline
1886 inlined = inline_always_inline_functions (node);
1890 || !flag_early_inlining
1891 /* Never inline regular functions into always-inline functions
1892 during incremental inlining. This sucks as functions calling
1893 always inline functions will get less optimized, but at the
1894 same time inlining of functions calling always inline
1895 function into an always inline function might introduce
1896 cycles of edges to be always inlined in the callgraph.
1898 We might want to be smarter and just avoid this type of inlining. */
1899 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
1901 else if (lookup_attribute ("flatten",
1902 DECL_ATTRIBUTES (node->decl)) != NULL)
1904 /* When the function is marked to be flattened, recursively inline
1908 "Flattening %s\n", cgraph_node_name (node));
1909 flatten_function (node, true);
1914 /* We iterate incremental inlining to get trivial cases of indirect
1916 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1917 && early_inline_small_functions (node))
1919 timevar_push (TV_INTEGRATION);
1920 todo |= optimize_inline_calls (current_function_decl);
1922 /* Technically we ought to recompute inline parameters so the new
1923 iteration of early inliner works as expected. We however have
1924 values approximately right and thus we only need to update edge
1925 info that might be cleared out for newly discovered edges. */
1926 for (edge = node->callees; edge; edge = edge->next_callee)
1928 struct inline_edge_summary *es = inline_edge_summary (edge);
1930 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1932 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1934 timevar_pop (TV_INTEGRATION);
1939 fprintf (dump_file, "Iterations: %i\n", iterations);
1944 timevar_push (TV_INTEGRATION);
1945 todo |= optimize_inline_calls (current_function_decl);
1946 timevar_pop (TV_INTEGRATION);
1949 cfun->always_inline_functions_inlined = true;
1954 struct gimple_opt_pass pass_early_inline =
1958 "einline", /* name */
1960 early_inliner, /* execute */
1963 0, /* static_pass_number */
1964 TV_INLINE_HEURISTICS, /* tv_id */
1965 PROP_ssa, /* properties_required */
1966 0, /* properties_provided */
1967 0, /* properties_destroyed */
1968 0, /* todo_flags_start */
1969 0 /* todo_flags_finish */
1974 /* When to run IPA inlining. Inlining of always-inline functions
1975 happens during early inlining.
1977 Enable inlining unconditoinally at -flto. We need size estimates to
1978 drive partitioning. */
1981 gate_ipa_inline (void)
1983 return optimize || flag_lto || flag_wpa;
1986 struct ipa_opt_pass_d pass_ipa_inline =
1990 "inline", /* name */
1991 gate_ipa_inline, /* gate */
1992 ipa_inline, /* execute */
1995 0, /* static_pass_number */
1996 TV_INLINE_HEURISTICS, /* tv_id */
1997 0, /* properties_required */
1998 0, /* properties_provided */
1999 0, /* properties_destroyed */
2000 TODO_remove_functions, /* todo_flags_finish */
2002 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2004 inline_generate_summary, /* generate_summary */
2005 inline_write_summary, /* write_summary */
2006 inline_read_summary, /* read_summary */
2007 NULL, /* write_optimization_summary */
2008 NULL, /* read_optimization_summary */
2009 NULL, /* stmt_fixup */
2011 inline_transform, /* function_transform */
2012 NULL, /* variable_transform */