1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
28 There are three major parts of this file:
30 cgraph_mark_inline implementation
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
36 inlining heuristics limits
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
65 The inliner itself is split into several passes:
67 pass_inline_parameters
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
93 pass_ipa_early_inlining
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
122 #include "coretypes.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
129 #include "diagnostic.h"
134 #include "tree-pass.h"
136 #include "coverage.h"
138 #include "tree-flow.h"
140 #include "ipa-prop.h"
142 /* Mode incremental inliner operate on:
144 In ALWAYS_INLINE only functions marked
145 always_inline are inlined. This mode is used after detecting cycle during
148 In SIZE mode, only functions that reduce function body size after inlining
149 are inlined, this is used during early inlining.
151 in ALL mode, everything is inlined. This is used during flattening. */
154 INLINE_ALWAYS_INLINE,
159 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
163 /* Statistics we collect about inlining algorithm. */
164 static int ncalls_inlined;
165 static int nfunctions_inlined;
166 static int overall_insns;
167 static gcov_type max_count;
169 /* Holders of ipa cgraph hooks: */
170 static struct cgraph_node_hook_list *function_insertion_hook_holder;
172 static inline struct inline_summary *
173 inline_summary (struct cgraph_node *node)
175 return &node->local.inline_summary;
178 /* Estimate size of the function after inlining WHAT into TO. */
181 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
182 struct cgraph_node *what)
185 tree fndecl = what->decl, arg;
186 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
188 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
189 call_insns += estimate_move_cost (TREE_TYPE (arg));
190 size = (what->global.insns - call_insns) * times + to->global.insns;
191 gcc_assert (size >= 0);
195 /* E is expected to be an edge being inlined. Clone destination node of
196 the edge and redirect it to the new clone.
197 DUPLICATE is used for bookkeeping on whether we are actually creating new
198 clones or re-using node originally representing out-of-line function call.
201 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
202 bool update_original)
208 /* We may eliminate the need for out-of-line copy to be output.
209 In that case just go ahead and re-use it. */
210 if (!e->callee->callers->next_caller
211 && !e->callee->needed
212 && !cgraph_new_nodes)
214 gcc_assert (!e->callee->global.inlined_to);
215 if (gimple_body (e->callee->decl))
216 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
221 struct cgraph_node *n;
222 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
224 cgraph_redirect_edge_callee (e, n);
228 if (e->caller->global.inlined_to)
229 e->callee->global.inlined_to = e->caller->global.inlined_to;
231 e->callee->global.inlined_to = e->caller;
232 e->callee->global.stack_frame_offset
233 = e->caller->global.stack_frame_offset
234 + inline_summary (e->caller)->estimated_self_stack_size;
235 peak = e->callee->global.stack_frame_offset
236 + inline_summary (e->callee)->estimated_self_stack_size;
237 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
238 e->callee->global.inlined_to->global.estimated_stack_size = peak;
240 /* Recursively clone all bodies. */
241 for (e = e->callee->callees; e; e = e->next_callee)
242 if (!e->inline_failed)
243 cgraph_clone_inlined_nodes (e, duplicate, update_original);
246 /* Mark edge E as inlined and update callgraph accordingly.
247 UPDATE_ORIGINAL specify whether profile of original function should be
251 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
253 int old_insns = 0, new_insns = 0;
254 struct cgraph_node *to = NULL, *what;
256 if (e->callee->inline_decl)
257 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
259 gcc_assert (e->inline_failed);
260 e->inline_failed = NULL;
262 if (!e->callee->global.inlined)
263 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
264 e->callee->global.inlined = true;
266 cgraph_clone_inlined_nodes (e, true, update_original);
270 /* Now update size of caller and all functions caller is inlined into. */
271 for (;e && !e->inline_failed; e = e->caller->callers)
273 old_insns = e->caller->global.insns;
274 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
276 gcc_assert (new_insns >= 0);
278 to->global.insns = new_insns;
280 gcc_assert (what->global.inlined_to == to);
281 if (new_insns > old_insns)
282 overall_insns += new_insns - old_insns;
286 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
287 Return following unredirected edge in the list of callers
290 static struct cgraph_edge *
291 cgraph_mark_inline (struct cgraph_edge *edge)
293 struct cgraph_node *to = edge->caller;
294 struct cgraph_node *what = edge->callee;
295 struct cgraph_edge *e, *next;
297 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
298 /* Look for all calls, mark them inline and clone recursively
299 all inlined functions. */
300 for (e = what->callers; e; e = next)
302 next = e->next_caller;
303 if (e->caller == to && e->inline_failed)
305 cgraph_mark_inline_edge (e, true);
314 /* Estimate the growth caused by inlining NODE into all callees. */
317 cgraph_estimate_growth (struct cgraph_node *node)
320 struct cgraph_edge *e;
321 if (node->global.estimated_growth != INT_MIN)
322 return node->global.estimated_growth;
324 for (e = node->callers; e; e = e->next_caller)
325 if (e->inline_failed)
326 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
327 - e->caller->global.insns);
329 /* ??? Wrong for self recursive functions or cases where we decide to not
330 inline for different reasons, but it is not big deal as in that case
331 we will keep the body around, but we will also avoid some inlining. */
332 if (!node->needed && !DECL_EXTERNAL (node->decl))
333 growth -= node->global.insns;
335 node->global.estimated_growth = growth;
339 /* Return false when inlining WHAT into TO is not good idea
340 as it would cause too large growth of function bodies.
341 When ONE_ONLY is true, assume that only one call site is going
342 to be inlined, otherwise figure out how many call sites in
343 TO calls WHAT and verify that all can be inlined.
347 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
348 const char **reason, bool one_only)
351 struct cgraph_edge *e;
354 HOST_WIDE_INT stack_size_limit, inlined_stack;
359 for (e = to->callees; e; e = e->next_callee)
360 if (e->callee == what)
363 if (to->global.inlined_to)
364 to = to->global.inlined_to;
366 /* When inlining large function body called once into small function,
367 take the inlined function as base for limiting the growth. */
368 if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
369 limit = inline_summary (to)->self_insns;
371 limit = inline_summary (what)->self_insns;
373 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
375 /* Check the size after inlining against the function limits. But allow
376 the function to shrink if it went over the limits by forced inlining. */
377 newsize = cgraph_estimate_size_after_inlining (times, to, what);
378 if (newsize >= to->global.insns
379 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
383 *reason = N_("--param large-function-growth limit reached");
387 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
389 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
391 inlined_stack = (to->global.stack_frame_offset
392 + inline_summary (to)->estimated_self_stack_size
393 + what->global.estimated_stack_size);
394 if (inlined_stack > stack_size_limit
395 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
398 *reason = N_("--param large-stack-frame-growth limit reached");
404 /* Return true when function N is small enough to be inlined. */
407 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
412 decl = n->inline_decl;
413 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
416 *reason = N_("function not inline candidate");
420 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
423 *reason = N_("function body not available");
427 if (DECL_DECLARED_INLINE_P (decl))
429 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
432 *reason = N_("--param max-inline-insns-single limit reached");
438 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
441 *reason = N_("--param max-inline-insns-auto limit reached");
449 /* Return true when inlining WHAT would create recursive inlining.
450 We call recursive inlining all cases where same function appears more than
451 once in the single recursion nest path in the inline graph. */
454 cgraph_recursive_inlining_p (struct cgraph_node *to,
455 struct cgraph_node *what,
459 if (to->global.inlined_to)
460 recursive = what->decl == to->global.inlined_to->decl;
462 recursive = what->decl == to->decl;
463 /* Marking recursive function inline has sane semantic and thus we should
465 if (recursive && reason)
466 *reason = (what->local.disregard_inline_limits
467 ? N_("recursive inlining") : "");
471 /* A cost model driving the inlining heuristics in a way so the edges with
472 smallest badness are inlined first. After each inlining is performed
473 the costs of all caller edges of nodes affected are recomputed so the
474 metrics may accurately depend on values such as number of inlinable callers
475 of the function or function body size. */
478 cgraph_edge_badness (struct cgraph_edge *edge)
482 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
484 growth -= edge->caller->global.insns;
486 /* Always prefer inlining saving code size. */
488 badness = INT_MIN - growth;
490 /* When profiling is available, base priorities -(#calls / growth).
491 So we optimize for overall number of "executed" inlined calls. */
493 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
495 /* When function local profile is available, base priorities on
496 growth / frequency, so we optimize for overall frequency of inlined
497 calls. This is not too accurate since while the call might be frequent
498 within function, the function itself is infrequent.
500 Other objective to optimize for is number of different calls inlined.
501 We add the estimated growth after inlining all functions to bias the
502 priorities slightly in this direction (so fewer times called functions
503 of the same size gets priority). */
504 else if (flag_guess_branch_prob)
506 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
508 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
509 growth -= edge->caller->global.insns;
510 badness = growth * 256;
512 /* Decrease badness if call is nested. */
513 /* Compress the range so we don't overflow. */
515 div = 256 + ceil_log2 (div) - 8;
520 badness += cgraph_estimate_growth (edge->callee);
522 /* When function local profile is not available or it does not give
523 useful information (ie frequency is zero), base the cost on
524 loop nest and overall size growth, so we optimize for overall number
525 of functions fully inlined in program. */
528 int nest = MIN (edge->loop_nest, 8);
529 badness = cgraph_estimate_growth (edge->callee) * 256;
531 /* Decrease badness if call is nested. */
539 /* Make recursive inlining happen always after other inlining is done. */
540 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
546 /* Recompute heap nodes for each of caller edge. */
549 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
550 bitmap updated_nodes)
552 struct cgraph_edge *edge;
553 const char *failed_reason;
555 if (!node->local.inlinable || node->local.disregard_inline_limits
556 || node->global.inlined_to)
558 if (bitmap_bit_p (updated_nodes, node->uid))
560 bitmap_set_bit (updated_nodes, node->uid);
561 node->global.estimated_growth = INT_MIN;
563 if (!node->local.inlinable)
565 /* Prune out edges we won't inline into anymore. */
566 if (!cgraph_default_inline_p (node, &failed_reason))
568 for (edge = node->callers; edge; edge = edge->next_caller)
571 fibheap_delete_node (heap, (fibnode_t) edge->aux);
573 if (edge->inline_failed)
574 edge->inline_failed = failed_reason;
579 for (edge = node->callers; edge; edge = edge->next_caller)
580 if (edge->inline_failed)
582 int badness = cgraph_edge_badness (edge);
585 fibnode_t n = (fibnode_t) edge->aux;
586 gcc_assert (n->data == edge);
587 if (n->key == badness)
590 /* fibheap_replace_key only increase the keys. */
591 if (fibheap_replace_key (heap, n, badness))
593 fibheap_delete_node (heap, (fibnode_t) edge->aux);
595 edge->aux = fibheap_insert (heap, badness, edge);
599 /* Recompute heap nodes for each of caller edges of each of callees. */
602 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
603 bitmap updated_nodes)
605 struct cgraph_edge *e;
606 node->global.estimated_growth = INT_MIN;
608 for (e = node->callees; e; e = e->next_callee)
609 if (e->inline_failed)
610 update_caller_keys (heap, e->callee, updated_nodes);
611 else if (!e->inline_failed)
612 update_callee_keys (heap, e->callee, updated_nodes);
615 /* Enqueue all recursive calls from NODE into priority queue depending on
616 how likely we want to recursively inline the call. */
619 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
623 struct cgraph_edge *e;
624 for (e = where->callees; e; e = e->next_callee)
625 if (e->callee == node)
627 /* When profile feedback is available, prioritize by expected number
628 of calls. Without profile feedback we maintain simple queue
629 to order candidates via recursive depths. */
630 fibheap_insert (heap,
631 !max_count ? priority++
632 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
635 for (e = where->callees; e; e = e->next_callee)
636 if (!e->inline_failed)
637 lookup_recursive_calls (node, e->callee, heap);
640 /* Decide on recursive inlining: in the case function has recursive calls,
641 inline until body size reaches given argument. If any new indirect edges
642 are discovered in the process, add them to NEW_EDGES, unless it is NULL. */
645 cgraph_decide_recursive_inlining (struct cgraph_node *node,
646 VEC (cgraph_edge_p, heap) *new_edges)
648 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
649 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
650 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
652 struct cgraph_edge *e;
653 struct cgraph_node *master_clone, *next;
657 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
658 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
661 if (DECL_DECLARED_INLINE_P (node->decl))
663 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
664 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
667 /* Make sure that function is small enough to be considered for inlining. */
669 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
671 heap = fibheap_new ();
672 lookup_recursive_calls (node, node, heap);
673 if (fibheap_empty (heap))
675 fibheap_delete (heap);
681 " Performing recursive inlining on %s\n",
682 cgraph_node_name (node));
684 /* We need original clone to copy around. */
685 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
686 master_clone->needed = true;
687 for (e = master_clone->callees; e; e = e->next_callee)
688 if (!e->inline_failed)
689 cgraph_clone_inlined_nodes (e, true, false);
691 /* Do the inlining and update list of recursive call during process. */
692 while (!fibheap_empty (heap)
693 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
696 struct cgraph_edge *curr
697 = (struct cgraph_edge *) fibheap_extract_min (heap);
698 struct cgraph_node *cnode;
701 for (cnode = curr->caller;
702 cnode->global.inlined_to; cnode = cnode->callers->caller)
703 if (node->decl == curr->callee->decl)
705 if (depth > max_depth)
709 " maximal depth reached\n");
715 if (!cgraph_maybe_hot_edge_p (curr))
718 fprintf (dump_file, " Not inlining cold call\n");
721 if (curr->count * 100 / node->count < probability)
725 " Probability of edge is too small\n");
733 " Inlining call of depth %i", depth);
736 fprintf (dump_file, " called approx. %.2f times per call",
737 (double)curr->count / node->count);
739 fprintf (dump_file, "\n");
741 cgraph_redirect_edge_callee (curr, master_clone);
742 cgraph_mark_inline_edge (curr, false);
743 if (flag_indirect_inlining)
744 ipa_propagate_indirect_call_infos (curr, new_edges);
745 lookup_recursive_calls (node, curr->callee, heap);
748 if (!fibheap_empty (heap) && dump_file)
749 fprintf (dump_file, " Recursive inlining growth limit met.\n");
751 fibheap_delete (heap);
754 "\n Inlined %i times, body grown from %i to %i insns\n", n,
755 master_clone->global.insns, node->global.insns);
757 /* Remove master clone we used for inlining. We rely that clones inlined
758 into master clone gets queued just before master clone so we don't
760 for (node = cgraph_nodes; node != master_clone;
764 if (node->global.inlined_to == master_clone)
765 cgraph_remove_node (node);
767 cgraph_remove_node (master_clone);
768 /* FIXME: Recursive inlining actually reduces number of calls of the
769 function. At this place we should probably walk the function and
770 inline clones and compensate the counts accordingly. This probably
771 doesn't matter much in practice. */
775 /* Set inline_failed for all callers of given function to REASON. */
778 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
780 struct cgraph_edge *e;
783 fprintf (dump_file, "Inlining failed: %s\n", reason);
784 for (e = node->callers; e; e = e->next_caller)
785 if (e->inline_failed)
786 e->inline_failed = reason;
789 /* Given whole compilation unit estimate of INSNS, compute how large we can
790 allow the unit to grow. */
792 compute_max_insns (int insns)
794 int max_insns = insns;
795 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
796 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
798 return ((HOST_WIDEST_INT) max_insns
799 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
802 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
804 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
806 while (VEC_length (cgraph_edge_p, new_edges) > 0)
808 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
810 gcc_assert (!edge->aux);
811 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
816 /* We use greedy algorithm for inlining of small functions:
817 All inline candidates are put into prioritized heap based on estimated
818 growth of the overall number of instructions and then update the estimates.
820 INLINED and INLINED_CALEES are just pointers to arrays large enough
821 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
824 cgraph_decide_inlining_of_small_functions (void)
826 struct cgraph_node *node;
827 struct cgraph_edge *edge;
828 const char *failed_reason;
829 fibheap_t heap = fibheap_new ();
830 bitmap updated_nodes = BITMAP_ALLOC (NULL);
831 int min_insns, max_insns;
832 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
834 if (flag_indirect_inlining)
835 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
838 fprintf (dump_file, "\nDeciding on smaller functions:\n");
840 /* Put all inline candidates into the heap. */
842 for (node = cgraph_nodes; node; node = node->next)
844 if (!node->local.inlinable || !node->callers
845 || node->local.disregard_inline_limits)
848 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
850 node->global.estimated_growth = INT_MIN;
851 if (!cgraph_default_inline_p (node, &failed_reason))
853 cgraph_set_inline_failed (node, failed_reason);
857 for (edge = node->callers; edge; edge = edge->next_caller)
858 if (edge->inline_failed)
860 gcc_assert (!edge->aux);
861 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
865 max_insns = compute_max_insns (overall_insns);
866 min_insns = overall_insns;
868 while (overall_insns <= max_insns
869 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
871 int old_insns = overall_insns;
872 struct cgraph_node *where;
874 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
875 const char *not_good = NULL;
877 growth -= edge->caller->global.insns;
882 "\nConsidering %s with %i insns\n",
883 cgraph_node_name (edge->callee),
884 edge->callee->global.insns);
886 " to be inlined into %s\n"
887 " Estimated growth after inlined into all callees is %+i insns.\n"
888 " Estimated badness is %i, frequency %.2f.\n",
889 cgraph_node_name (edge->caller),
890 cgraph_estimate_growth (edge->callee),
891 cgraph_edge_badness (edge),
892 edge->frequency / (double)CGRAPH_FREQ_BASE);
894 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
896 gcc_assert (edge->aux);
898 if (!edge->inline_failed)
901 /* When not having profile info ready we don't weight by any way the
902 position of call in procedure itself. This means if call of
903 function A from function B seems profitable to inline, the recursive
904 call of function A in inline copy of A in B will look profitable too
905 and we end up inlining until reaching maximal function growth. This
906 is not good idea so prohibit the recursive inlining.
908 ??? When the frequencies are taken into account we might not need this
912 where = edge->caller;
913 while (where->global.inlined_to)
915 if (where->decl == edge->callee->decl)
917 where = where->callers->caller;
919 if (where->global.inlined_to)
922 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
924 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
929 if (!cgraph_maybe_hot_edge_p (edge))
930 not_good = N_("call is unlikely and code size would grow");
931 if (!flag_inline_functions
932 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
933 not_good = N_("function not declared inline and code size would grow");
934 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
935 not_good = N_("optimizing for size and code size would grow");
936 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
938 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
939 &edge->inline_failed))
941 edge->inline_failed = not_good;
943 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
947 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
949 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
950 &edge->inline_failed))
953 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
957 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
959 gimple_call_set_cannot_inline (edge->call_stmt, true);
960 edge->inline_failed = N_("target specific option mismatch");
962 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
965 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
966 &edge->inline_failed))
968 where = edge->caller;
969 if (where->global.inlined_to)
970 where = where->global.inlined_to;
971 if (!cgraph_decide_recursive_inlining (where, new_indirect_edges))
973 if (flag_indirect_inlining)
974 add_new_edges_to_heap (heap, new_indirect_edges);
975 update_callee_keys (heap, where, updated_nodes);
979 struct cgraph_node *callee;
980 if (gimple_call_cannot_inline_p (edge->call_stmt)
981 || !cgraph_check_inline_limits (edge->caller, edge->callee,
982 &edge->inline_failed, true))
985 fprintf (dump_file, " Not inlining into %s:%s.\n",
986 cgraph_node_name (edge->caller), edge->inline_failed);
989 callee = edge->callee;
990 cgraph_mark_inline_edge (edge, true);
991 if (flag_indirect_inlining)
993 ipa_propagate_indirect_call_infos (edge, new_indirect_edges);
994 add_new_edges_to_heap (heap, new_indirect_edges);
996 update_callee_keys (heap, callee, updated_nodes);
998 where = edge->caller;
999 if (where->global.inlined_to)
1000 where = where->global.inlined_to;
1002 /* Our profitability metric can depend on local properties
1003 such as number of inlinable calls and size of the function body.
1004 After inlining these properties might change for the function we
1005 inlined into (since it's body size changed) and for the functions
1006 called by function we inlined (since number of it inlinable callers
1008 update_caller_keys (heap, where, updated_nodes);
1009 bitmap_clear (updated_nodes);
1014 " Inlined into %s which now has %i insns,"
1015 "net change of %+i insns.\n",
1016 cgraph_node_name (edge->caller),
1017 edge->caller->global.insns,
1018 overall_insns - old_insns);
1020 if (min_insns > overall_insns)
1022 min_insns = overall_insns;
1023 max_insns = compute_max_insns (min_insns);
1026 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
1029 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1031 gcc_assert (edge->aux);
1033 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1034 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1035 &edge->inline_failed))
1036 edge->inline_failed = N_("--param inline-unit-growth limit reached");
1039 if (new_indirect_edges)
1040 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1041 fibheap_delete (heap);
1042 BITMAP_FREE (updated_nodes);
1045 /* Decide on the inlining. We do so in the topological order to avoid
1046 expenses on updating data structures. */
1049 cgraph_decide_inlining (void)
1051 struct cgraph_node *node;
1053 struct cgraph_node **order =
1054 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1057 int initial_insns = 0;
1059 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1062 for (node = cgraph_nodes; node; node = node->next)
1063 if (node->analyzed && (node->needed || node->reachable))
1065 struct cgraph_edge *e;
1067 initial_insns += inline_summary (node)->self_insns;
1068 gcc_assert (inline_summary (node)->self_insns == node->global.insns);
1069 for (e = node->callees; e; e = e->next_callee)
1070 if (max_count < e->count)
1071 max_count = e->count;
1073 overall_insns = initial_insns;
1074 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1076 nnodes = cgraph_postorder (order);
1080 "\nDeciding on inlining. Starting with %i insns.\n",
1083 for (node = cgraph_nodes; node; node = node->next)
1087 fprintf (dump_file, "\nInlining always_inline functions:\n");
1089 /* In the first pass mark all always_inline edges. Do this with a priority
1090 so none of our later choices will make this impossible. */
1091 for (i = nnodes - 1; i >= 0; i--)
1093 struct cgraph_edge *e, *next;
1097 /* Handle nodes to be flattened, but don't update overall unit size. */
1098 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1102 "Flattening %s\n", cgraph_node_name (node));
1103 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1106 if (!node->local.disregard_inline_limits)
1110 "\nConsidering %s %i insns (always inline)\n",
1111 cgraph_node_name (node), node->global.insns);
1112 old_insns = overall_insns;
1113 for (e = node->callers; e; e = next)
1115 next = e->next_caller;
1116 if (!e->inline_failed || gimple_call_cannot_inline_p (e->call_stmt))
1118 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1121 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1123 gimple_call_set_cannot_inline (e->call_stmt, true);
1126 cgraph_mark_inline_edge (e, true);
1127 if (flag_indirect_inlining)
1128 ipa_propagate_indirect_call_infos (e, NULL);
1131 " Inlined into %s which now has %i insns.\n",
1132 cgraph_node_name (e->caller),
1133 e->caller->global.insns);
1135 /* Inlining self recursive function might introduce new calls to
1136 themselves we didn't see in the loop above. Fill in the proper
1137 reason why inline failed. */
1138 for (e = node->callers; e; e = e->next_caller)
1139 if (e->inline_failed)
1140 e->inline_failed = N_("recursive inlining");
1143 " Inlined for a net change of %+i insns.\n",
1144 overall_insns - old_insns);
1147 cgraph_decide_inlining_of_small_functions ();
1149 /* After this point, any edge discovery performed by indirect inlining is no
1150 good so let's give up. */
1151 if (flag_indirect_inlining)
1152 free_all_ipa_structures_after_iinln ();
1154 if (flag_inline_functions_called_once)
1157 fprintf (dump_file, "\nDeciding on functions called once:\n");
1159 /* And finally decide what functions are called once. */
1160 for (i = nnodes - 1; i >= 0; i--)
1165 && !node->callers->next_caller
1167 && node->local.inlinable
1168 && node->callers->inline_failed
1169 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1170 && !DECL_EXTERNAL (node->decl)
1171 && !DECL_COMDAT (node->decl))
1176 "\nConsidering %s %i insns.\n",
1177 cgraph_node_name (node), node->global.insns);
1179 " Called once from %s %i insns.\n",
1180 cgraph_node_name (node->callers->caller),
1181 node->callers->caller->global.insns);
1184 old_insns = overall_insns;
1186 if (cgraph_check_inline_limits (node->callers->caller, node,
1189 cgraph_mark_inline (node->callers);
1192 " Inlined into %s which now has %i insns"
1193 " for a net change of %+i insns.\n",
1194 cgraph_node_name (node->callers->caller),
1195 node->callers->caller->global.insns,
1196 overall_insns - old_insns);
1202 " Inline limit reached, not inlined.\n");
1210 "\nInlined %i calls, eliminated %i functions, "
1211 "%i insns turned to %i insns.\n\n",
1212 ncalls_inlined, nfunctions_inlined, initial_insns,
1218 /* Try to inline edge E from incremental inliner. MODE specifies mode
1221 We are detecting cycles by storing mode of inliner into cgraph_node last
1222 time we visited it in the recursion. In general when mode is set, we have
1223 recursive inlining, but as an special case, we want to try harder inline
1224 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1225 flatten, b being always inline. Flattening 'a' will collapse
1226 a->b->c before hitting cycle. To accommodate always inline, we however
1227 need to inline a->b->c->b.
1229 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1230 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1232 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1234 struct cgraph_node *callee = e->callee;
1235 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1236 bool always_inline = e->callee->local.disregard_inline_limits;
1238 /* We've hit cycle? */
1241 /* It is first time we see it and we are not in ALWAY_INLINE only
1242 mode yet. and the function in question is always_inline. */
1243 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1247 indent_to (dump_file, depth);
1249 "Hit cycle in %s, switching to always inline only.\n",
1250 cgraph_node_name (callee));
1252 mode = INLINE_ALWAYS_INLINE;
1254 /* Otherwise it is time to give up. */
1259 indent_to (dump_file, depth);
1261 "Not inlining %s into %s to avoid cycle.\n",
1262 cgraph_node_name (callee),
1263 cgraph_node_name (e->caller));
1265 e->inline_failed = (e->callee->local.disregard_inline_limits
1266 ? N_("recursive inlining") : "");
1271 callee->aux = (void *)(size_t) mode;
1274 indent_to (dump_file, depth);
1275 fprintf (dump_file, " Inlining %s into %s.\n",
1276 cgraph_node_name (e->callee),
1277 cgraph_node_name (e->caller));
1279 if (e->inline_failed)
1280 cgraph_mark_inline (e);
1282 /* In order to fully inline always_inline functions, we need to
1283 recurse here, since the inlined functions might not be processed by
1284 incremental inlining at all yet.
1286 Also flattening needs to be done recursively. */
1288 if (mode == INLINE_ALL || always_inline)
1289 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1290 callee->aux = (void *)(size_t) callee_mode;
1294 /* Decide on the inlining. We do so in the topological order to avoid
1295 expenses on updating data structures.
1296 DEPTH is depth of recursion, used only for debug output. */
1299 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1300 enum inlining_mode mode,
1303 struct cgraph_edge *e;
1304 bool inlined = false;
1305 const char *failed_reason;
1306 enum inlining_mode old_mode;
1308 #ifdef ENABLE_CHECKING
1309 verify_cgraph_node (node);
1312 old_mode = (enum inlining_mode) (size_t)node->aux;
1314 if (mode != INLINE_ALWAYS_INLINE
1315 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1319 indent_to (dump_file, depth);
1320 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1325 node->aux = (void *)(size_t) mode;
1327 /* First of all look for always inline functions. */
1328 for (e = node->callees; e; e = e->next_callee)
1330 if (!e->callee->local.disregard_inline_limits
1331 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1333 if (gimple_call_cannot_inline_p (e->call_stmt))
1335 /* When the edge is already inlined, we just need to recurse into
1336 it in order to fully flatten the leaves. */
1337 if (!e->inline_failed && mode == INLINE_ALL)
1339 inlined |= try_inline (e, mode, depth);
1344 indent_to (dump_file, depth);
1346 "Considering to always inline inline candidate %s.\n",
1347 cgraph_node_name (e->callee));
1349 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1353 indent_to (dump_file, depth);
1354 fprintf (dump_file, "Not inlining: recursive call.\n");
1358 if (!tree_can_inline_p (node->decl, e->callee->decl))
1360 gimple_call_set_cannot_inline (e->call_stmt, true);
1363 indent_to (dump_file, depth);
1365 "Not inlining: Target specific option mismatch.\n");
1369 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1370 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1374 indent_to (dump_file, depth);
1375 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1379 if (!gimple_body (e->callee->decl) && !e->callee->inline_decl)
1383 indent_to (dump_file, depth);
1385 "Not inlining: Function body no longer available.\n");
1389 inlined |= try_inline (e, mode, depth);
1392 /* Now do the automatic inlining. */
1393 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1394 for (e = node->callees; e; e = e->next_callee)
1396 if (!e->callee->local.inlinable
1397 || !e->inline_failed
1398 || e->callee->local.disregard_inline_limits)
1401 fprintf (dump_file, "Considering inline candidate %s.\n",
1402 cgraph_node_name (e->callee));
1403 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1407 indent_to (dump_file, depth);
1408 fprintf (dump_file, "Not inlining: recursive call.\n");
1412 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1413 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1417 indent_to (dump_file, depth);
1418 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1422 /* When the function body would grow and inlining the function won't
1423 eliminate the need for offline copy of the function, don't inline.
1425 if ((mode == INLINE_SIZE
1426 || (!flag_inline_functions
1427 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1428 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1429 > e->caller->global.insns)
1430 && cgraph_estimate_growth (e->callee) > 0)
1434 indent_to (dump_file, depth);
1436 "Not inlining: code size would grow by %i insns.\n",
1437 cgraph_estimate_size_after_inlining (1, e->caller,
1439 - e->caller->global.insns);
1443 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1445 || gimple_call_cannot_inline_p (e->call_stmt))
1449 indent_to (dump_file, depth);
1450 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1454 if (!gimple_body (e->callee->decl) && !e->callee->inline_decl)
1458 indent_to (dump_file, depth);
1460 "Not inlining: Function body no longer available.\n");
1464 if (!tree_can_inline_p (node->decl, e->callee->decl))
1466 gimple_call_set_cannot_inline (e->call_stmt, true);
1469 indent_to (dump_file, depth);
1471 "Not inlining: Target specific option mismatch.\n");
1475 if (cgraph_default_inline_p (e->callee, &failed_reason))
1476 inlined |= try_inline (e, mode, depth);
1478 node->aux = (void *)(size_t) old_mode;
1482 /* Because inlining might remove no-longer reachable nodes, we need to
1483 keep the array visible to garbage collector to avoid reading collected
1486 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1488 /* Do inlining of small functions. Doing so early helps profiling and other
1489 passes to be somewhat more effective and avoids some code duplication in
1490 later real inlining pass for testcases with very many function calls. */
1492 cgraph_early_inlining (void)
1494 struct cgraph_node *node = cgraph_node (current_function_decl);
1495 unsigned int todo = 0;
1497 if (sorrycount || errorcount)
1499 if (cgraph_decide_inlining_incrementally (node, INLINE_SIZE, 0))
1501 timevar_push (TV_INTEGRATION);
1502 todo = optimize_inline_calls (current_function_decl);
1503 timevar_pop (TV_INTEGRATION);
1508 /* When inlining shall be performed. */
1510 cgraph_gate_early_inlining (void)
1512 return flag_early_inlining;
1515 struct gimple_opt_pass pass_early_inline =
1519 "einline", /* name */
1520 cgraph_gate_early_inlining, /* gate */
1521 cgraph_early_inlining, /* execute */
1524 0, /* static_pass_number */
1525 TV_INLINE_HEURISTICS, /* tv_id */
1526 0, /* properties_required */
1527 PROP_cfg, /* properties_provided */
1528 0, /* properties_destroyed */
1529 0, /* todo_flags_start */
1530 TODO_dump_func /* todo_flags_finish */
1534 /* When inlining shall be performed. */
1536 cgraph_gate_ipa_early_inlining (void)
1538 return (flag_early_inlining
1539 && (flag_branch_probabilities || flag_test_coverage
1540 || profile_arc_flag));
1543 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1544 before tree profiling so we have stand alone IPA pass for doing so. */
1545 struct simple_ipa_opt_pass pass_ipa_early_inline =
1549 "einline_ipa", /* name */
1550 cgraph_gate_ipa_early_inlining, /* gate */
1554 0, /* static_pass_number */
1555 TV_INLINE_HEURISTICS, /* tv_id */
1556 0, /* properties_required */
1557 PROP_cfg, /* properties_provided */
1558 0, /* properties_destroyed */
1559 0, /* todo_flags_start */
1560 TODO_dump_cgraph /* todo_flags_finish */
1564 /* Compute parameters of functions used by inliner. */
1566 compute_inline_parameters (struct cgraph_node *node)
1568 gcc_assert (!node->global.inlined_to);
1569 inline_summary (node)->estimated_self_stack_size
1570 = estimated_stack_frame_size ();
1571 node->global.estimated_stack_size
1572 = inline_summary (node)->estimated_self_stack_size;
1573 node->global.stack_frame_offset = 0;
1574 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1575 inline_summary (node)->self_insns
1576 = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
1577 if (node->local.inlinable && !node->local.disregard_inline_limits)
1578 node->local.disregard_inline_limits
1579 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1580 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1581 node->global.insns = inline_summary (node)->self_insns;
1586 /* Compute parameters of functions used by inliner using
1587 current_function_decl. */
1589 compute_inline_parameters_for_current (void)
1591 compute_inline_parameters (cgraph_node (current_function_decl));
1595 struct gimple_opt_pass pass_inline_parameters =
1601 compute_inline_parameters_for_current,/* execute */
1604 0, /* static_pass_number */
1605 TV_INLINE_HEURISTICS, /* tv_id */
1606 0, /* properties_required */
1607 PROP_cfg, /* properties_provided */
1608 0, /* properties_destroyed */
1609 0, /* todo_flags_start */
1610 0 /* todo_flags_finish */
1614 /* This function performs intraprocedural analyzis in NODE that is required to
1615 inline indirect calls. */
1617 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1619 struct cgraph_edge *cs;
1623 ipa_count_formal_params (node);
1624 ipa_create_param_decls_array (node);
1625 ipa_detect_param_modifications (node);
1627 ipa_analyze_params_uses (node);
1630 for (cs = node->callees; cs; cs = cs->next_callee)
1632 ipa_count_arguments (cs);
1633 ipa_compute_jump_functions (cs);
1638 ipa_print_node_params (dump_file, node);
1639 ipa_print_node_jump_functions (dump_file, node);
1643 /* Note function body size. */
1645 analyze_function (struct cgraph_node *node)
1647 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1648 current_function_decl = node->decl;
1650 compute_inline_parameters (node);
1651 if (flag_indirect_inlining)
1652 inline_indirect_intraprocedural_analysis (node);
1654 current_function_decl = NULL;
1658 /* Called when new function is inserted to callgraph late. */
1660 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1662 analyze_function (node);
1665 /* Note function body size. */
1667 inline_generate_summary (void)
1669 struct cgraph_node *node;
1671 function_insertion_hook_holder =
1672 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1674 if (flag_indirect_inlining)
1676 ipa_register_cgraph_hooks ();
1677 ipa_check_create_node_params ();
1678 ipa_check_create_edge_args ();
1681 for (node = cgraph_nodes; node; node = node->next)
1683 analyze_function (node);
1688 /* Apply inline plan to function. */
1690 inline_transform (struct cgraph_node *node)
1692 unsigned int todo = 0;
1693 struct cgraph_edge *e;
1695 /* We might need the body of this function so that we can expand
1696 it inline somewhere else. */
1697 if (cgraph_preserve_function_body_p (current_function_decl))
1698 save_inline_function_body (node);
1700 for (e = node->callees; e; e = e->next_callee)
1701 if (!e->inline_failed || warn_inline)
1706 timevar_push (TV_INTEGRATION);
1707 todo = optimize_inline_calls (current_function_decl);
1708 timevar_pop (TV_INTEGRATION);
1710 return todo | execute_fixup_cfg ();
1713 struct ipa_opt_pass pass_ipa_inline =
1717 "inline", /* name */
1719 cgraph_decide_inlining, /* execute */
1722 0, /* static_pass_number */
1723 TV_INLINE_HEURISTICS, /* tv_id */
1724 0, /* properties_required */
1725 PROP_cfg, /* properties_provided */
1726 0, /* properties_destroyed */
1727 TODO_remove_functions, /* todo_flags_finish */
1728 TODO_dump_cgraph | TODO_dump_func
1729 | TODO_remove_functions /* todo_flags_finish */
1731 inline_generate_summary, /* generate_summary */
1732 NULL, /* write_summary */
1733 NULL, /* read_summary */
1734 NULL, /* function_read_summary */
1736 inline_transform, /* function_transform */
1737 NULL, /* variable_transform */
1741 #include "gt-ipa-inline.h"