1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
29 There are three major parts of this file:
31 cgraph_mark_inline_edge implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into two passes:
70 Simple local inlining pass inlining callees into current function. This
71 pass makes no global whole compilation unit analysis and this when allowed
72 to do inlining expanding code size it might result in unbounded growth of
75 The pass is run during conversion into SSA form. Only functions already
76 converted into SSA form are inlined, so the conversion must happen in
77 topological order on the callgraph (that is maintained by pass manager).
78 The functions after inlining are early optimized so the early inliner sees
79 unoptimized function itself, but all considered callees are already
80 optimized allowing it to unfold abstraction penalty on C++ effectively and
85 This is the main pass implementing simple greedy algorithm to do inlining
86 of small functions that results in overall growth of compilation unit and
87 inlining of functions called once. The pass compute just so called inline
88 plan (representation of inlining to be done in callgraph) and unlike early
89 inlining it is not performing the inlining itself.
94 #include "coretypes.h"
97 #include "tree-inline.h"
98 #include "langhooks.h"
101 #include "diagnostic.h"
102 #include "gimple-pretty-print.h"
107 #include "tree-pass.h"
109 #include "coverage.h"
111 #include "tree-flow.h"
113 #include "ipa-prop.h"
115 #include "ipa-inline.h"
117 #define MAX_TIME 1000000000
120 /* Statistics we collect about inlining algorithm. */
121 static int ncalls_inlined;
122 static int nfunctions_inlined;
123 static int overall_size;
124 static gcov_type max_count, max_benefit;
126 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
130 update_noncloned_frequencies (struct cgraph_node *node,
131 int freq_scale, int nest)
133 struct cgraph_edge *e;
135 /* We do not want to ignore high loop nest after freq drops to 0. */
138 for (e = node->callees; e; e = e->next_callee)
140 e->loop_nest += nest;
141 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
142 if (e->frequency > CGRAPH_FREQ_MAX)
143 e->frequency = CGRAPH_FREQ_MAX;
144 if (!e->inline_failed)
145 update_noncloned_frequencies (e->callee, freq_scale, nest);
149 /* E is expected to be an edge being inlined. Clone destination node of
150 the edge and redirect it to the new clone.
151 DUPLICATE is used for bookkeeping on whether we are actually creating new
152 clones or re-using node originally representing out-of-line function call.
155 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
156 bool update_original)
162 /* We may eliminate the need for out-of-line copy to be output.
163 In that case just go ahead and re-use it. */
164 if (!e->callee->callers->next_caller
165 /* Recursive inlining never wants the master clone to be overwritten. */
167 /* FIXME: When address is taken of DECL_EXTERNAL function we still can remove its
168 offline copy, but we would need to keep unanalyzed node in the callgraph so
169 references can point to it. */
170 && !e->callee->address_taken
171 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
172 /* Inlining might enable more devirtualizing, so we want to remove
173 those only after all devirtualizable virtual calls are processed.
174 Lacking may edges in callgraph we just preserve them post
176 && (!DECL_VIRTUAL_P (e->callee->decl)
177 || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
178 /* Don't reuse if more than one function shares a comdat group.
179 If the other function(s) are needed, we need to emit even
180 this function out of line. */
181 && !e->callee->same_comdat_group
182 && !cgraph_new_nodes)
184 gcc_assert (!e->callee->global.inlined_to);
185 if (e->callee->analyzed && !DECL_EXTERNAL (e->callee->decl))
187 overall_size -= e->callee->global.size;
188 nfunctions_inlined++;
191 e->callee->local.externally_visible = false;
192 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
196 struct cgraph_node *n;
197 n = cgraph_clone_node (e->callee, e->callee->decl,
198 e->count, e->frequency, e->loop_nest,
199 update_original, NULL);
200 cgraph_redirect_edge_callee (e, n);
204 if (e->caller->global.inlined_to)
205 e->callee->global.inlined_to = e->caller->global.inlined_to;
207 e->callee->global.inlined_to = e->caller;
208 e->callee->global.stack_frame_offset
209 = e->caller->global.stack_frame_offset
210 + inline_summary (e->caller)->estimated_self_stack_size;
211 peak = e->callee->global.stack_frame_offset
212 + inline_summary (e->callee)->estimated_self_stack_size;
213 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
214 e->callee->global.inlined_to->global.estimated_stack_size = peak;
215 cgraph_propagate_frequency (e->callee);
217 /* Recursively clone all bodies. */
218 for (e = e->callee->callees; e; e = e->next_callee)
219 if (!e->inline_failed)
220 cgraph_clone_inlined_nodes (e, duplicate, update_original);
223 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
224 specify whether profile of original function should be updated. If any new
225 indirect edges are discovered in the process, add them to NEW_EDGES, unless
226 it is NULL. Return true iff any new callgraph edges were discovered as a
227 result of inlining. */
230 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
231 VEC (cgraph_edge_p, heap) **new_edges)
233 int old_size = 0, new_size = 0;
234 struct cgraph_node *to = NULL;
235 struct cgraph_edge *curr = e;
237 /* Don't inline inlined edges. */
238 gcc_assert (e->inline_failed);
239 /* Don't even think of inlining inline clone. */
240 gcc_assert (!e->callee->global.inlined_to);
242 e->inline_failed = CIF_OK;
243 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
245 cgraph_clone_inlined_nodes (e, true, update_original);
247 /* Now update size of caller and all functions caller is inlined into. */
248 for (;e && !e->inline_failed; e = e->caller->callers)
251 old_size = e->caller->global.size;
252 new_size = estimate_size_after_inlining (to, curr);
253 to->global.size = new_size;
254 to->global.time = estimate_time_after_inlining (to, curr);
256 gcc_assert (curr->callee->global.inlined_to == to);
257 if (new_size > old_size)
258 overall_size += new_size - old_size;
261 /* FIXME: We should remove the optimize check after we ensure we never run
262 IPA passes when not optimizing. */
263 if (flag_indirect_inlining && optimize)
264 return ipa_propagate_indirect_call_infos (curr, new_edges);
269 /* Return false when inlining edge E is not good idea
270 as it would cause too large growth of the callers function body
271 or stack frame size. *REASON if non-NULL is updated if the
272 inlining is not a good idea. */
275 cgraph_check_inline_limits (struct cgraph_edge *e,
276 cgraph_inline_failed_t *reason)
278 struct cgraph_node *to = e->caller;
279 struct cgraph_node *what = e->callee;
282 HOST_WIDE_INT stack_size_limit, inlined_stack;
284 if (to->global.inlined_to)
285 to = to->global.inlined_to;
287 /* When inlining large function body called once into small function,
288 take the inlined function as base for limiting the growth. */
289 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
290 limit = inline_summary (to)->self_size;
292 limit = inline_summary (what)->self_size;
294 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
296 /* Check the size after inlining against the function limits. But allow
297 the function to shrink if it went over the limits by forced inlining. */
298 newsize = estimate_size_after_inlining (to, e);
299 if (newsize >= to->global.size
300 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
304 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
308 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
310 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
312 inlined_stack = (to->global.stack_frame_offset
313 + inline_summary (to)->estimated_self_stack_size
314 + what->global.estimated_stack_size);
315 if (inlined_stack > stack_size_limit
316 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
319 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
325 /* Return true when function N is small enough to be inlined. */
328 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
332 if (n->local.disregard_inline_limits)
335 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
338 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
344 *reason = CIF_BODY_NOT_AVAILABLE;
347 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
350 *reason = CIF_OVERWRITABLE;
355 if (DECL_DECLARED_INLINE_P (decl))
357 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
360 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
366 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
369 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
377 /* A cost model driving the inlining heuristics in a way so the edges with
378 smallest badness are inlined first. After each inlining is performed
379 the costs of all caller edges of nodes affected are recomputed so the
380 metrics may accurately depend on values such as number of inlinable callers
381 of the function or function body size. */
384 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
389 if (edge->callee->local.disregard_inline_limits)
392 growth = estimate_edge_growth (edge);
396 fprintf (dump_file, " Badness calculation for %s -> %s\n",
397 cgraph_node_name (edge->caller),
398 cgraph_node_name (edge->callee));
399 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
401 edge->callee->global.time,
402 inline_summary (edge->callee)->time_inlining_benefit
403 + edge->call_stmt_time,
404 edge->callee->global.size,
405 inline_summary (edge->callee)->size_inlining_benefit
406 + edge->call_stmt_size);
409 /* Always prefer inlining saving code size. */
412 badness = INT_MIN - growth;
414 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
418 /* When profiling is available, base priorities -(#calls / growth).
419 So we optimize for overall number of "executed" inlined calls. */
424 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
425 (inline_summary (edge->callee)->time_inlining_benefit
426 + edge->call_stmt_time + 1)) / growth;
430 " %i (relative %f): profile info. Relative count %f"
431 " * Relative benefit %f\n",
432 (int) badness, (double) badness / INT_MIN,
433 (double) edge->count / max_count,
434 (double) (inline_summary (edge->callee)->
435 time_inlining_benefit
436 + edge->call_stmt_time + 1) / (max_benefit + 1));
440 /* When function local profile is available, base priorities on
441 growth / frequency, so we optimize for overall frequency of inlined
442 calls. This is not too accurate since while the call might be frequent
443 within function, the function itself is infrequent.
445 Other objective to optimize for is number of different calls inlined.
446 We add the estimated growth after inlining all functions to bias the
447 priorities slightly in this direction (so fewer times called functions
448 of the same size gets priority). */
449 else if (flag_guess_branch_prob)
451 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
454 badness = growth * 10000;
456 100 * (inline_summary (edge->callee)->time_inlining_benefit
457 + edge->call_stmt_time)
458 / (edge->callee->global.time + 1) + 1;
459 benefitperc = MIN (benefitperc, 100);
462 /* Decrease badness if call is nested. */
463 /* Compress the range so we don't overflow. */
465 div = 10000 + ceil_log2 (div) - 8;
470 growth_for_all = estimate_growth (edge->callee);
471 badness += growth_for_all;
472 if (badness > INT_MAX)
477 " %i: guessed profile. frequency %i, overall growth %i,"
478 " benefit %i%%, divisor %i\n",
479 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
482 /* When function local profile is not available or it does not give
483 useful information (ie frequency is zero), base the cost on
484 loop nest and overall size growth, so we optimize for overall number
485 of functions fully inlined in program. */
488 int nest = MIN (edge->loop_nest, 8);
489 badness = estimate_growth (edge->callee) * 256;
491 /* Decrease badness if call is nested. */
499 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
503 /* Ensure that we did not overflow in all the fixed point math above. */
504 gcc_assert (badness >= INT_MIN);
505 gcc_assert (badness <= INT_MAX - 1);
506 /* Make recursive inlining happen always after other inlining is done. */
507 if (cgraph_edge_recursive_p (edge))
513 /* Recompute badness of EDGE and update its key in HEAP if needed. */
515 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
517 int badness = cgraph_edge_badness (edge, false);
520 fibnode_t n = (fibnode_t) edge->aux;
521 gcc_checking_assert (n->data == edge);
523 /* fibheap_replace_key only decrease the keys.
524 When we increase the key we do not update heap
525 and instead re-insert the element once it becomes
526 a minimum of heap. */
527 if (badness < n->key)
529 fibheap_replace_key (heap, n, badness);
530 gcc_checking_assert (n->key == badness);
534 edge->aux = fibheap_insert (heap, badness, edge);
537 /* Recompute heap nodes for each of caller edge. */
540 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
541 bitmap updated_nodes)
543 struct cgraph_edge *edge;
544 cgraph_inline_failed_t failed_reason;
546 if (!node->local.inlinable
547 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
548 || node->global.inlined_to)
550 if (!bitmap_set_bit (updated_nodes, node->uid))
552 node->global.estimated_growth = INT_MIN;
554 /* See if there is something to do. */
555 for (edge = node->callers; edge; edge = edge->next_caller)
556 if (edge->inline_failed)
560 /* Prune out edges we won't inline into anymore. */
561 if (!cgraph_default_inline_p (node, &failed_reason))
563 for (; edge; edge = edge->next_caller)
566 fibheap_delete_node (heap, (fibnode_t) edge->aux);
568 if (edge->inline_failed)
569 edge->inline_failed = failed_reason;
574 for (; edge; edge = edge->next_caller)
575 if (edge->inline_failed)
576 update_edge_key (heap, edge);
579 /* Recompute heap nodes for each uninlined call.
580 This is used when we know that edge badnesses are going only to increase
581 (we introduced new call site) and thus all we need is to insert newly
582 created edges into heap. */
585 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
586 bitmap updated_nodes)
588 struct cgraph_edge *e = node->callees;
589 node->global.estimated_growth = INT_MIN;
594 if (!e->inline_failed && e->callee->callees)
595 e = e->callee->callees;
599 && e->callee->local.inlinable
600 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
601 && !bitmap_bit_p (updated_nodes, e->callee->uid))
603 node->global.estimated_growth = INT_MIN;
604 /* If function becomes uninlinable, we need to remove it from the heap. */
605 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
606 update_caller_keys (heap, e->callee, updated_nodes);
608 /* Otherwise update just edge E. */
609 update_edge_key (heap, e);
617 if (e->caller == node)
619 e = e->caller->callers;
621 while (!e->next_callee);
627 /* Recompute heap nodes for each of caller edges of each of callees.
628 Walk recursively into all inline clones. */
631 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
632 bitmap updated_nodes)
634 struct cgraph_edge *e = node->callees;
635 node->global.estimated_growth = INT_MIN;
640 if (!e->inline_failed && e->callee->callees)
641 e = e->callee->callees;
644 if (e->inline_failed)
645 update_caller_keys (heap, e->callee, updated_nodes);
652 if (e->caller == node)
654 e = e->caller->callers;
656 while (!e->next_callee);
662 /* Enqueue all recursive calls from NODE into priority queue depending on
663 how likely we want to recursively inline the call. */
666 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
670 struct cgraph_edge *e;
671 for (e = where->callees; e; e = e->next_callee)
672 if (e->callee == node)
674 /* When profile feedback is available, prioritize by expected number
675 of calls. Without profile feedback we maintain simple queue
676 to order candidates via recursive depths. */
677 fibheap_insert (heap,
678 !max_count ? priority++
679 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
682 for (e = where->callees; e; e = e->next_callee)
683 if (!e->inline_failed)
684 lookup_recursive_calls (node, e->callee, heap);
687 /* Decide on recursive inlining: in the case function has recursive calls,
688 inline until body size reaches given argument. If any new indirect edges
689 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
693 cgraph_decide_recursive_inlining (struct cgraph_edge *edge,
694 VEC (cgraph_edge_p, heap) **new_edges)
696 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
697 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
698 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
700 struct cgraph_node *node;
701 struct cgraph_edge *e;
702 struct cgraph_node *master_clone, *next;
707 if (node->global.inlined_to)
708 node = node->global.inlined_to;
710 /* It does not make sense to recursively inline always-inline functions
711 as we are going to sorry() on the remaining calls anyway. */
712 if (node->local.disregard_inline_limits
713 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
716 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
717 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
720 if (DECL_DECLARED_INLINE_P (node->decl))
722 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
723 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
726 /* Make sure that function is small enough to be considered for inlining. */
728 || estimate_size_after_inlining (node, edge) >= limit)
730 heap = fibheap_new ();
731 lookup_recursive_calls (node, node, heap);
732 if (fibheap_empty (heap))
734 fibheap_delete (heap);
740 " Performing recursive inlining on %s\n",
741 cgraph_node_name (node));
743 /* We need original clone to copy around. */
744 master_clone = cgraph_clone_node (node, node->decl,
745 node->count, CGRAPH_FREQ_BASE, 1,
747 for (e = master_clone->callees; e; e = e->next_callee)
748 if (!e->inline_failed)
749 cgraph_clone_inlined_nodes (e, true, false);
751 /* Do the inlining and update list of recursive call during process. */
752 while (!fibheap_empty (heap))
754 struct cgraph_edge *curr
755 = (struct cgraph_edge *) fibheap_extract_min (heap);
756 struct cgraph_node *cnode;
758 if (estimate_size_after_inlining (node, curr) > limit)
762 for (cnode = curr->caller;
763 cnode->global.inlined_to; cnode = cnode->callers->caller)
764 if (node->decl == curr->callee->decl)
766 if (depth > max_depth)
770 " maximal depth reached\n");
776 if (!cgraph_maybe_hot_edge_p (curr))
779 fprintf (dump_file, " Not inlining cold call\n");
782 if (curr->count * 100 / node->count < probability)
786 " Probability of edge is too small\n");
794 " Inlining call of depth %i", depth);
797 fprintf (dump_file, " called approx. %.2f times per call",
798 (double)curr->count / node->count);
800 fprintf (dump_file, "\n");
802 cgraph_redirect_edge_callee (curr, master_clone);
803 cgraph_mark_inline_edge (curr, false, new_edges);
804 lookup_recursive_calls (node, curr->callee, heap);
807 if (!fibheap_empty (heap) && dump_file)
808 fprintf (dump_file, " Recursive inlining growth limit met.\n");
810 fibheap_delete (heap);
813 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
814 master_clone->global.size, node->global.size,
815 master_clone->global.time, node->global.time);
817 /* Remove master clone we used for inlining. We rely that clones inlined
818 into master clone gets queued just before master clone so we don't
820 for (node = cgraph_nodes; node != master_clone;
824 if (node->global.inlined_to == master_clone)
825 cgraph_remove_node (node);
827 cgraph_remove_node (master_clone);
828 /* FIXME: Recursive inlining actually reduces number of calls of the
829 function. At this place we should probably walk the function and
830 inline clones and compensate the counts accordingly. This probably
831 doesn't matter much in practice. */
835 /* Set inline_failed for all callers of given function to REASON. */
838 cgraph_set_inline_failed (struct cgraph_node *node,
839 cgraph_inline_failed_t reason)
841 struct cgraph_edge *e;
844 fprintf (dump_file, "Inlining failed: %s\n",
845 cgraph_inline_failed_string (reason));
846 for (e = node->callers; e; e = e->next_caller)
847 if (e->inline_failed)
848 e->inline_failed = reason;
851 /* Given whole compilation unit estimate of INSNS, compute how large we can
852 allow the unit to grow. */
854 compute_max_insns (int insns)
856 int max_insns = insns;
857 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
858 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
860 return ((HOST_WIDEST_INT) max_insns
861 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
864 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
866 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
868 while (VEC_length (cgraph_edge_p, new_edges) > 0)
870 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
872 gcc_assert (!edge->aux);
873 if (edge->callee->local.inlinable
874 && edge->inline_failed
875 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
876 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
881 /* We use greedy algorithm for inlining of small functions:
882 All inline candidates are put into prioritized heap based on estimated
883 growth of the overall number of instructions and then update the estimates.
885 INLINED and INLINED_CALLEES are just pointers to arrays large enough
886 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
889 cgraph_decide_inlining_of_small_functions (void)
891 struct cgraph_node *node;
892 struct cgraph_edge *edge;
893 cgraph_inline_failed_t failed_reason;
894 fibheap_t heap = fibheap_new ();
895 bitmap updated_nodes = BITMAP_ALLOC (NULL);
896 int min_size, max_size;
897 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
899 if (flag_indirect_inlining)
900 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
903 fprintf (dump_file, "\nDeciding on smaller functions:\n");
905 /* Put all inline candidates into the heap. */
907 for (node = cgraph_nodes; node; node = node->next)
909 if (!node->local.inlinable || !node->callers)
912 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
914 node->global.estimated_growth = INT_MIN;
915 if (!cgraph_default_inline_p (node, &failed_reason))
917 cgraph_set_inline_failed (node, failed_reason);
921 for (edge = node->callers; edge; edge = edge->next_caller)
922 if (edge->inline_failed)
924 gcc_assert (!edge->aux);
925 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
929 max_size = compute_max_insns (overall_size);
930 min_size = overall_size;
932 while (overall_size <= max_size
933 && !fibheap_empty (heap))
935 int old_size = overall_size;
936 struct cgraph_node *where, *callee;
937 int badness = fibheap_min_key (heap);
940 cgraph_inline_failed_t not_good = CIF_OK;
942 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
943 gcc_assert (edge->aux);
945 if (!edge->inline_failed)
948 /* When updating the edge costs, we only decrease badness in the keys.
949 When the badness increase, we keep the heap as it is and re-insert
951 current_badness = cgraph_edge_badness (edge, false);
952 gcc_assert (current_badness >= badness);
953 if (current_badness != badness)
955 edge->aux = fibheap_insert (heap, current_badness, edge);
959 callee = edge->callee;
960 growth = estimate_edge_growth (edge);
964 "\nConsidering %s with %i size\n",
965 cgraph_node_name (edge->callee),
966 edge->callee->global.size);
968 " to be inlined into %s in %s:%i\n"
969 " Estimated growth after inlined into all callees is %+i insns.\n"
970 " Estimated badness is %i, frequency %.2f.\n",
971 cgraph_node_name (edge->caller),
973 : gimple_filename ((const_gimple) edge->call_stmt),
974 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
975 estimate_growth (edge->callee),
977 edge->frequency / (double)CGRAPH_FREQ_BASE);
979 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
980 if (dump_flags & TDF_DETAILS)
981 cgraph_edge_badness (edge, true);
984 /* When not having profile info ready we don't weight by any way the
985 position of call in procedure itself. This means if call of
986 function A from function B seems profitable to inline, the recursive
987 call of function A in inline copy of A in B will look profitable too
988 and we end up inlining until reaching maximal function growth. This
989 is not good idea so prohibit the recursive inlining.
991 ??? When the frequencies are taken into account we might not need this
994 We need to be careful here, in some testcases, e.g. directives.c in
995 libcpp, we can estimate self recursive function to have negative growth
996 for inlining completely.
1000 where = edge->caller;
1001 while (where->global.inlined_to)
1003 if (where->decl == edge->callee->decl)
1005 where = where->callers->caller;
1007 if (where->global.inlined_to)
1010 = (edge->callee->local.disregard_inline_limits
1011 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1013 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1018 if (edge->callee->local.disregard_inline_limits)
1020 else if (!cgraph_maybe_hot_edge_p (edge))
1021 not_good = CIF_UNLIKELY_CALL;
1022 else if (!flag_inline_functions
1023 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1024 not_good = CIF_NOT_DECLARED_INLINED;
1025 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1026 not_good = CIF_OPTIMIZING_FOR_SIZE;
1027 if (not_good && growth > 0 && estimate_growth (edge->callee) > 0)
1029 edge->inline_failed = not_good;
1031 fprintf (dump_file, " inline_failed:%s.\n",
1032 cgraph_inline_failed_string (edge->inline_failed));
1035 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1038 fprintf (dump_file, " inline_failed:%s.\n",
1039 cgraph_inline_failed_string (edge->inline_failed));
1042 if (!tree_can_inline_p (edge)
1043 || edge->call_stmt_cannot_inline_p)
1046 fprintf (dump_file, " inline_failed:%s.\n",
1047 cgraph_inline_failed_string (edge->inline_failed));
1050 if (cgraph_edge_recursive_p (edge))
1052 where = edge->caller;
1053 if (where->global.inlined_to)
1054 where = where->global.inlined_to;
1055 if (!cgraph_decide_recursive_inlining (edge,
1056 flag_indirect_inlining
1057 ? &new_indirect_edges : NULL))
1059 edge->inline_failed = CIF_RECURSIVE_INLINING;
1062 if (flag_indirect_inlining)
1063 add_new_edges_to_heap (heap, new_indirect_edges);
1064 update_all_callee_keys (heap, where, updated_nodes);
1068 struct cgraph_node *callee;
1069 if (!cgraph_check_inline_limits (edge, &edge->inline_failed))
1072 fprintf (dump_file, " Not inlining into %s:%s.\n",
1073 cgraph_node_name (edge->caller),
1074 cgraph_inline_failed_string (edge->inline_failed));
1077 callee = edge->callee;
1078 gcc_checking_assert (!callee->global.inlined_to);
1079 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1080 if (flag_indirect_inlining)
1081 add_new_edges_to_heap (heap, new_indirect_edges);
1083 /* We inlined last offline copy to the body. This might lead
1084 to callees of function having fewer call sites and thus they
1085 may need updating. */
1086 if (callee->global.inlined_to)
1087 update_all_callee_keys (heap, callee, updated_nodes);
1089 update_callee_keys (heap, edge->callee, updated_nodes);
1091 where = edge->caller;
1092 if (where->global.inlined_to)
1093 where = where->global.inlined_to;
1095 /* Our profitability metric can depend on local properties
1096 such as number of inlinable calls and size of the function body.
1097 After inlining these properties might change for the function we
1098 inlined into (since it's body size changed) and for the functions
1099 called by function we inlined (since number of it inlinable callers
1101 update_caller_keys (heap, where, updated_nodes);
1103 /* We removed one call of the function we just inlined. If offline
1104 copy is still needed, be sure to update the keys. */
1105 if (callee != where && !callee->global.inlined_to)
1106 update_caller_keys (heap, callee, updated_nodes);
1107 bitmap_clear (updated_nodes);
1112 " Inlined into %s which now has time %i and size %i,"
1113 "net change of %+i.\n",
1114 cgraph_node_name (edge->caller),
1115 edge->caller->global.time,
1116 edge->caller->global.size,
1117 overall_size - old_size);
1119 if (min_size > overall_size)
1121 min_size = overall_size;
1122 max_size = compute_max_insns (min_size);
1125 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1128 while (!fibheap_empty (heap))
1130 int badness = fibheap_min_key (heap);
1132 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1133 gcc_assert (edge->aux);
1135 if (!edge->inline_failed)
1137 #ifdef ENABLE_CHECKING
1138 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1143 "\nSkipping %s with %i size\n",
1144 cgraph_node_name (edge->callee),
1145 edge->callee->global.size);
1147 " called by %s in %s:%i\n"
1148 " Estimated growth after inlined into all callees is %+i insns.\n"
1149 " Estimated badness is %i, frequency %.2f.\n",
1150 cgraph_node_name (edge->caller),
1151 flag_wpa ? "unknown"
1152 : gimple_filename ((const_gimple) edge->call_stmt),
1153 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1154 estimate_growth (edge->callee),
1156 edge->frequency / (double)CGRAPH_FREQ_BASE);
1158 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1159 if (dump_flags & TDF_DETAILS)
1160 cgraph_edge_badness (edge, true);
1162 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed)
1163 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1166 if (new_indirect_edges)
1167 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1168 fibheap_delete (heap);
1169 BITMAP_FREE (updated_nodes);
1172 /* Flatten NODE from the IPA inliner. */
1175 cgraph_flatten (struct cgraph_node *node)
1177 struct cgraph_edge *e;
1179 /* We shouldn't be called recursively when we are being processed. */
1180 gcc_assert (node->aux == NULL);
1182 node->aux = (void *) node;
1184 for (e = node->callees; e; e = e->next_callee)
1186 struct cgraph_node *orig_callee;
1188 if (e->call_stmt_cannot_inline_p)
1191 fprintf (dump_file, "Not inlining: %s",
1192 cgraph_inline_failed_string (e->inline_failed));
1196 if (!e->callee->analyzed)
1200 "Not inlining: Function body not available.\n");
1204 /* We've hit cycle? It is time to give up. */
1209 "Not inlining %s into %s to avoid cycle.\n",
1210 cgraph_node_name (e->callee),
1211 cgraph_node_name (e->caller));
1212 e->inline_failed = CIF_RECURSIVE_INLINING;
1216 /* When the edge is already inlined, we just need to recurse into
1217 it in order to fully flatten the leaves. */
1218 if (!e->inline_failed)
1220 cgraph_flatten (e->callee);
1224 if (cgraph_edge_recursive_p (e))
1227 fprintf (dump_file, "Not inlining: recursive call.\n");
1231 if (!tree_can_inline_p (e))
1234 fprintf (dump_file, "Not inlining: %s",
1235 cgraph_inline_failed_string (e->inline_failed));
1239 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1240 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1243 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1247 /* Inline the edge and flatten the inline clone. Avoid
1248 recursing through the original node if the node was cloned. */
1250 fprintf (dump_file, " Inlining %s into %s.\n",
1251 cgraph_node_name (e->callee),
1252 cgraph_node_name (e->caller));
1253 orig_callee = e->callee;
1254 cgraph_mark_inline_edge (e, true, NULL);
1255 if (e->callee != orig_callee)
1256 orig_callee->aux = (void *) node;
1257 cgraph_flatten (e->callee);
1258 if (e->callee != orig_callee)
1259 orig_callee->aux = NULL;
1265 /* Decide on the inlining. We do so in the topological order to avoid
1266 expenses on updating data structures. */
1269 cgraph_decide_inlining (void)
1271 struct cgraph_node *node;
1273 struct cgraph_node **order =
1274 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1277 int initial_size = 0;
1279 if (in_lto_p && flag_indirect_inlining)
1280 ipa_update_after_lto_read ();
1281 if (flag_indirect_inlining)
1282 ipa_create_all_structures_for_iinln ();
1286 for (node = cgraph_nodes; node; node = node->next)
1289 struct cgraph_edge *e;
1291 gcc_assert (inline_summary (node)->self_size == node->global.size);
1292 if (!DECL_EXTERNAL (node->decl))
1293 initial_size += node->global.size;
1294 for (e = node->callees; e; e = e->next_callee)
1296 int benefit = (inline_summary (node)->time_inlining_benefit
1297 + e->call_stmt_time);
1298 if (max_count < e->count)
1299 max_count = e->count;
1300 if (max_benefit < benefit)
1301 max_benefit = benefit;
1304 gcc_assert (in_lto_p
1306 || (profile_info && flag_branch_probabilities));
1307 overall_size = initial_size;
1309 nnodes = cgraph_postorder (order);
1313 "\nDeciding on inlining. Starting with size %i.\n",
1316 for (node = cgraph_nodes; node; node = node->next)
1320 fprintf (dump_file, "\nFlattening functions:\n");
1322 /* In the first pass handle functions to be flattened. Do this with
1323 a priority so none of our later choices will make this impossible. */
1324 for (i = nnodes - 1; i >= 0; i--)
1328 /* Handle nodes to be flattened, but don't update overall unit
1329 size. Calling the incremental inliner here is lame,
1330 a simple worklist should be enough. What should be left
1331 here from the early inliner (if it runs) is cyclic cases.
1332 Ideally when processing callees we stop inlining at the
1333 entry of cycles, possibly cloning that entry point and
1334 try to flatten itself turning it into a self-recursive
1336 if (lookup_attribute ("flatten",
1337 DECL_ATTRIBUTES (node->decl)) != NULL)
1341 "Flattening %s\n", cgraph_node_name (node));
1342 cgraph_flatten (node);
1346 cgraph_decide_inlining_of_small_functions ();
1348 if (flag_inline_functions_called_once)
1351 fprintf (dump_file, "\nDeciding on functions called once:\n");
1353 /* And finally decide what functions are called once. */
1354 for (i = nnodes - 1; i >= 0; i--)
1359 && !node->callers->next_caller
1360 && !node->global.inlined_to
1361 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1362 && node->local.inlinable
1363 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1364 && node->callers->inline_failed
1365 && node->callers->caller != node
1366 && node->callers->caller->global.inlined_to != node
1367 && !node->callers->call_stmt_cannot_inline_p
1368 && tree_can_inline_p (node->callers)
1369 && !DECL_EXTERNAL (node->decl))
1371 cgraph_inline_failed_t reason;
1372 old_size = overall_size;
1376 "\nConsidering %s size %i.\n",
1377 cgraph_node_name (node), node->global.size);
1379 " Called once from %s %i insns.\n",
1380 cgraph_node_name (node->callers->caller),
1381 node->callers->caller->global.size);
1384 if (cgraph_check_inline_limits (node->callers, &reason))
1386 struct cgraph_node *caller = node->callers->caller;
1387 cgraph_mark_inline_edge (node->callers, true, NULL);
1390 " Inlined into %s which now has %i size"
1391 " for a net change of %+i size.\n",
1392 cgraph_node_name (caller),
1393 caller->global.size,
1394 overall_size - old_size);
1400 " Not inlining: %s.\n",
1401 cgraph_inline_failed_string (reason));
1407 /* Free ipa-prop structures if they are no longer needed. */
1408 if (flag_indirect_inlining)
1409 ipa_free_all_structures_after_iinln ();
1413 "\nInlined %i calls, eliminated %i functions, "
1414 "size %i turned to %i size.\n\n",
1415 ncalls_inlined, nfunctions_inlined, initial_size,
1418 inline_free_summary ();
1422 /* Return true when N is leaf function. Accept cheap builtins
1423 in leaf functions. */
1426 leaf_node_p (struct cgraph_node *n)
1428 struct cgraph_edge *e;
1429 for (e = n->callees; e; e = e->next_callee)
1430 if (!is_inexpensive_builtin (e->callee->decl))
1435 /* Return true if the edge E is inlinable during early inlining. */
1438 cgraph_edge_early_inlinable_p (struct cgraph_edge *e, FILE *file)
1440 if (!e->callee->local.inlinable)
1443 fprintf (file, "Not inlining: Function not inlinable.\n");
1446 if (!e->callee->analyzed)
1449 fprintf (file, "Not inlining: Function body not available.\n");
1452 if (!tree_can_inline_p (e)
1453 || e->call_stmt_cannot_inline_p)
1456 fprintf (file, "Not inlining: %s.\n",
1457 cgraph_inline_failed_string (e->inline_failed));
1460 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
1461 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1464 fprintf (file, "Not inlining: not in SSA form.\n");
1470 /* Inline always-inline function calls in NODE. */
1473 cgraph_perform_always_inlining (struct cgraph_node *node)
1475 struct cgraph_edge *e;
1476 bool inlined = false;
1478 for (e = node->callees; e; e = e->next_callee)
1480 if (!e->callee->local.disregard_inline_limits)
1485 "Considering always-inline candidate %s.\n",
1486 cgraph_node_name (e->callee));
1488 if (cgraph_edge_recursive_p (e))
1491 fprintf (dump_file, "Not inlining: recursive call.\n");
1492 e->inline_failed = CIF_RECURSIVE_INLINING;
1496 if (!cgraph_edge_early_inlinable_p (e, dump_file))
1500 fprintf (dump_file, " Inlining %s into %s.\n",
1501 cgraph_node_name (e->callee),
1502 cgraph_node_name (e->caller));
1503 cgraph_mark_inline_edge (e, true, NULL);
1510 /* Decide on the inlining. We do so in the topological order to avoid
1511 expenses on updating data structures. */
1514 cgraph_decide_inlining_incrementally (struct cgraph_node *node)
1516 struct cgraph_edge *e;
1517 bool inlined = false;
1518 cgraph_inline_failed_t failed_reason;
1520 /* Never inline regular functions into always-inline functions
1521 during incremental inlining. */
1522 if (node->local.disregard_inline_limits)
1525 for (e = node->callees; e; e = e->next_callee)
1527 int allowed_growth = 0;
1529 if (!e->callee->local.inlinable
1530 || !e->inline_failed
1531 || e->callee->local.disregard_inline_limits)
1534 /* Do not consider functions not declared inline. */
1535 if (!DECL_DECLARED_INLINE_P (e->callee->decl)
1536 && !flag_inline_small_functions
1537 && !flag_inline_functions)
1541 fprintf (dump_file, "Considering inline candidate %s.\n",
1542 cgraph_node_name (e->callee));
1544 if (cgraph_edge_recursive_p (e))
1547 fprintf (dump_file, "Not inlining: recursive call.\n");
1551 if (!cgraph_edge_early_inlinable_p (e, dump_file))
1554 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1555 && optimize_function_for_speed_p (cfun))
1556 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1558 /* When the function body would grow and inlining the function
1559 won't eliminate the need for offline copy of the function,
1561 if (estimate_edge_growth (e) > allowed_growth
1562 && estimate_growth (e->callee) > allowed_growth)
1566 "Not inlining: code size would grow by %i.\n",
1567 estimate_edge_growth (e));
1570 if (!cgraph_check_inline_limits (e, &e->inline_failed))
1573 fprintf (dump_file, "Not inlining: %s.\n",
1574 cgraph_inline_failed_string (e->inline_failed));
1577 if (cgraph_default_inline_p (e->callee, &failed_reason))
1580 fprintf (dump_file, " Inlining %s into %s.\n",
1581 cgraph_node_name (e->callee),
1582 cgraph_node_name (e->caller));
1583 cgraph_mark_inline_edge (e, true, NULL);
1591 /* Because inlining might remove no-longer reachable nodes, we need to
1592 keep the array visible to garbage collector to avoid reading collected
1595 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1597 /* Do inlining of small functions. Doing so early helps profiling and other
1598 passes to be somewhat more effective and avoids some code duplication in
1599 later real inlining pass for testcases with very many function calls. */
1601 cgraph_early_inlining (void)
1603 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1604 unsigned int todo = 0;
1606 bool inlined = false;
1611 #ifdef ENABLE_CHECKING
1612 verify_cgraph_node (node);
1615 /* Even when not optimizing or not inlining inline always-inline
1617 inlined = cgraph_perform_always_inlining (node);
1621 || !flag_early_inlining)
1623 else if (lookup_attribute ("flatten",
1624 DECL_ATTRIBUTES (node->decl)) != NULL)
1626 /* When the function is marked to be flattened, recursively inline
1630 "Flattening %s\n", cgraph_node_name (node));
1631 cgraph_flatten (node);
1636 /* We iterate incremental inlining to get trivial cases of indirect
1638 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1639 && cgraph_decide_inlining_incrementally (node))
1641 timevar_push (TV_INTEGRATION);
1642 todo |= optimize_inline_calls (current_function_decl);
1643 timevar_pop (TV_INTEGRATION);
1648 fprintf (dump_file, "Iterations: %i\n", iterations);
1653 timevar_push (TV_INTEGRATION);
1654 todo |= optimize_inline_calls (current_function_decl);
1655 timevar_pop (TV_INTEGRATION);
1658 cfun->always_inline_functions_inlined = true;
1663 struct gimple_opt_pass pass_early_inline =
1667 "einline", /* name */
1669 cgraph_early_inlining, /* execute */
1672 0, /* static_pass_number */
1673 TV_INLINE_HEURISTICS, /* tv_id */
1674 PROP_ssa, /* properties_required */
1675 0, /* properties_provided */
1676 0, /* properties_destroyed */
1677 0, /* todo_flags_start */
1678 TODO_dump_func /* todo_flags_finish */
1683 /* Apply inline plan to function. */
1685 inline_transform (struct cgraph_node *node)
1687 unsigned int todo = 0;
1688 struct cgraph_edge *e;
1689 bool inline_p = false;
1691 /* FIXME: Currently the pass manager is adding inline transform more than once to some
1692 clones. This needs revisiting after WPA cleanups. */
1693 if (cfun->after_inlining)
1696 /* We might need the body of this function so that we can expand
1697 it inline somewhere else. */
1698 if (cgraph_preserve_function_body_p (node->decl))
1699 save_inline_function_body (node);
1701 for (e = node->callees; e; e = e->next_callee)
1703 cgraph_redirect_edge_call_stmt_to_callee (e);
1704 if (!e->inline_failed || warn_inline)
1710 timevar_push (TV_INTEGRATION);
1711 todo = optimize_inline_calls (current_function_decl);
1712 timevar_pop (TV_INTEGRATION);
1714 cfun->always_inline_functions_inlined = true;
1715 cfun->after_inlining = true;
1716 return todo | execute_fixup_cfg ();
1719 /* When to run IPA inlining. Inlining of always-inline functions
1720 happens during early inlining. */
1723 gate_cgraph_decide_inlining (void)
1725 /* ??? We'd like to skip this if not optimizing or not inlining as
1726 all always-inline functions have been processed by early
1727 inlining already. But this at least breaks EH with C++ as
1728 we need to unconditionally run fixup_cfg even at -O0.
1729 So leave it on unconditionally for now. */
1733 struct ipa_opt_pass_d pass_ipa_inline =
1737 "inline", /* name */
1738 gate_cgraph_decide_inlining, /* gate */
1739 cgraph_decide_inlining, /* execute */
1742 0, /* static_pass_number */
1743 TV_INLINE_HEURISTICS, /* tv_id */
1744 0, /* properties_required */
1745 0, /* properties_provided */
1746 0, /* properties_destroyed */
1747 TODO_remove_functions, /* todo_flags_finish */
1748 TODO_dump_cgraph | TODO_dump_func
1749 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
1751 inline_generate_summary, /* generate_summary */
1752 inline_write_summary, /* write_summary */
1753 inline_read_summary, /* read_summary */
1754 NULL, /* write_optimization_summary */
1755 NULL, /* read_optimization_summary */
1756 NULL, /* stmt_fixup */
1758 inline_transform, /* function_transform */
1759 NULL, /* variable_transform */
1763 #include "gt-ipa-inline.h"