clones or re-using node originally representing out-of-line function call.
*/
void
-cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate)
+cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
{
- struct cgraph_node *n;
-
- /* We may eliminate the need for out-of-line copy to be output. In that
- case just go ahead and re-use it. */
- if (!e->callee->callers->next_caller
- && (!e->callee->needed || DECL_EXTERNAL (e->callee->decl))
- && duplicate
- && flag_unit_at_a_time)
+ if (duplicate)
{
- gcc_assert (!e->callee->global.inlined_to);
- if (!DECL_EXTERNAL (e->callee->decl))
- overall_insns -= e->callee->global.insns, nfunctions_inlined++;
- duplicate = 0;
- }
- else if (duplicate)
- {
- n = cgraph_clone_node (e->callee, e->count, e->loop_nest, true);
- cgraph_redirect_edge_callee (e, n);
+ /* We may eliminate the need for out-of-line copy to be output.
+ In that case just go ahead and re-use it. */
+ if (!e->callee->callers->next_caller
+ && !e->callee->needed
+ && flag_unit_at_a_time)
+ {
+ gcc_assert (!e->callee->global.inlined_to);
+ if (DECL_SAVED_TREE (e->callee->decl))
+ overall_insns -= e->callee->global.insns, nfunctions_inlined++;
+ duplicate = false;
+ }
+ else
+ {
+ struct cgraph_node *n;
+ n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
+ update_original);
+ cgraph_redirect_edge_callee (e, n);
+ }
}
if (e->caller->global.inlined_to)
/* Recursively clone all bodies. */
for (e = e->callee->callees; e; e = e->next_callee)
if (!e->inline_failed)
- cgraph_clone_inlined_nodes (e, duplicate);
+ cgraph_clone_inlined_nodes (e, duplicate, update_original);
}
-/* Mark edge E as inlined and update callgraph accordingly. */
+/* Mark edge E as inlined and update callgraph accordingly.
+ UPDATE_ORIGINAL specify whether profile of original function should be
+ updated. */
void
-cgraph_mark_inline_edge (struct cgraph_edge *e)
+cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
{
int old_insns = 0, new_insns = 0;
struct cgraph_node *to = NULL, *what;
+ if (e->callee->inline_decl)
+ cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
+
gcc_assert (e->inline_failed);
e->inline_failed = NULL;
DECL_POSSIBLY_INLINED (e->callee->decl) = true;
e->callee->global.inlined = true;
- cgraph_clone_inlined_nodes (e, true);
+ cgraph_clone_inlined_nodes (e, true, update_original);
what = e->callee;
next = e->next_caller;
if (e->caller == to && e->inline_failed)
{
- cgraph_mark_inline_edge (e);
+ cgraph_mark_inline_edge (e, true);
if (e == edge)
edge = next;
times++;
}
/* Return false when inlining WHAT into TO is not good idea
- as it would cause too large growth of function bodies. */
+ as it would cause too large growth of function bodies.
+ When ONE_ONLY is true, assume that only one call site is going
+ to be inlined, otherwise figure out how many call sites in
+ TO calls WHAT and verify that all can be inlined.
+ */
static bool
cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
- const char **reason)
+ const char **reason, bool one_only)
{
int times = 0;
struct cgraph_edge *e;
int newsize;
int limit;
+ if (one_only)
+ times = 1;
+ else
+ for (e = to->callees; e; e = e->next_callee)
+ if (e->callee == what)
+ times++;
+
if (to->global.inlined_to)
to = to->global.inlined_to;
- for (e = to->callees; e; e = e->next_callee)
- if (e->callee == what)
- times++;
-
/* When inlining large function body called once into small function,
take the inlined function as base for limiting the growth. */
if (to->local.self_insns > what->local.self_insns)
limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
+ /* Check the size after inlining against the function limits. But allow
+ the function to shrink if it went over the limits by forced inlining. */
newsize = cgraph_estimate_size_after_inlining (times, to, what);
- if (newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
+ if (newsize >= to->global.insns
+ && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
&& newsize > limit)
{
if (reason)
bool
cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
{
- if (!DECL_INLINE (n->decl))
+ tree decl = n->decl;
+
+ if (n->inline_decl)
+ decl = n->inline_decl;
+ if (!DECL_INLINE (decl))
{
if (reason)
*reason = N_("function not inlinable");
return false;
}
- if (!DECL_SAVED_TREE (n->decl))
+ if (!DECL_STRUCT_FUNCTION (decl)->cfg)
{
if (reason)
*reason = N_("function body not available");
return false;
}
- if (DECL_DECLARED_INLINE_P (n->decl))
+ if (DECL_DECLARED_INLINE_P (decl))
{
if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
{
bitmap updated_nodes)
{
struct cgraph_edge *edge;
+ const char *failed_reason;
if (!node->local.inlinable || node->local.disregard_inline_limits
|| node->global.inlined_to)
bitmap_set_bit (updated_nodes, node->uid);
node->global.estimated_growth = INT_MIN;
+ if (!node->local.inlinable)
+ return;
+ /* Prune out edges we won't inline into anymore. */
+ if (!cgraph_default_inline_p (node, &failed_reason))
+ {
+ for (edge = node->callers; edge; edge = edge->next_caller)
+ if (edge->aux)
+ {
+ fibheap_delete_node (heap, edge->aux);
+ edge->aux = NULL;
+ if (edge->inline_failed)
+ edge->inline_failed = failed_reason;
+ }
+ return;
+ }
+
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->inline_failed)
{
{
if (dump_file)
fprintf (dump_file, " inlining %s", cgraph_node_name (e->callee));
- cgraph_mark_inline_edge (e);
+ cgraph_mark_inline_edge (e, true);
cgraph_flatten_node (e->callee, cycles);
}
else if (dump_file)
int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
fibheap_t heap;
struct cgraph_edge *e;
- struct cgraph_node *master_clone;
+ struct cgraph_node *master_clone, *next;
int depth = 0;
int n = 0;
master_clone->needed = true;
for (e = master_clone->callees; e; e = e->next_callee)
if (!e->inline_failed)
- cgraph_clone_inlined_nodes (e, true);
+ cgraph_clone_inlined_nodes (e, true, false);
/* Do the inlining and update list of recursive call during process. */
while (!fibheap_empty (heap)
fprintf (dump_file, "\n");
}
cgraph_redirect_edge_callee (curr, master_clone);
- cgraph_mark_inline_edge (curr);
+ cgraph_mark_inline_edge (curr, false);
lookup_recursive_calls (node, curr->callee, heap);
n++;
}
into master clone gets queued just before master clone so we don't
need recursion. */
for (node = cgraph_nodes; node != master_clone;
- node = node->next)
- if (node->global.inlined_to == master_clone)
- cgraph_remove_node (node);
+ node = next)
+ {
+ next = node->next;
+ if (node->global.inlined_to == master_clone)
+ cgraph_remove_node (node);
+ }
cgraph_remove_node (master_clone);
/* FIXME: Recursive inlining actually reduces number of calls of the
function. At this place we should probably walk the function and
inline clones and compensate the counts accordingly. This probably
doesn't matter much in practice. */
- return true;
+ return n > 0;
}
/* Set inline_failed for all callers of given function to REASON. */
if (dump_file)
{
fprintf (dump_file,
- "\nConsidering %s with %i insns to be inlined into %s\n"
+ "\nConsidering %s with %i insns\n",
+ cgraph_node_name (edge->callee),
+ edge->callee->global.insns);
+ fprintf (dump_file,
+ " to be inlined into %s\n"
" Estimated growth after inlined into all callees is %+i insns.\n"
" Estimated badness is %i.\n",
- cgraph_node_name (edge->callee),
- edge->callee->global.insns,
cgraph_node_name (edge->caller),
cgraph_estimate_growth (edge->callee),
cgraph_edge_badness (edge));
{
struct cgraph_node *callee;
if (!cgraph_check_inline_limits (edge->caller, edge->callee,
- &edge->inline_failed))
+ &edge->inline_failed, true))
{
if (dump_file)
fprintf (dump_file, " Not inlining into %s:%s.\n",
continue;
}
callee = edge->callee;
- cgraph_mark_inline_edge (edge);
+ cgraph_mark_inline_edge (edge, true);
update_callee_keys (heap, callee, updated_nodes);
}
where = edge->caller;
bitmap_clear (updated_nodes);
if (dump_file)
- fprintf (dump_file,
- " Inlined into %s which now has %i insns.\n",
- cgraph_node_name (edge->caller),
- edge->caller->global.insns);
- if (dump_file)
- fprintf (dump_file,
- " Inlined for a net change of %+i insns.\n",
- overall_insns - old_insns);
+ {
+ fprintf (dump_file,
+ " Inlined into %s which now has %i insns,"
+ "net change of %+i insns.\n",
+ cgraph_node_name (edge->caller),
+ edge->caller->global.insns,
+ overall_insns - old_insns);
+ }
}
while ((edge = fibheap_extract_min (heap)) != NULL)
{
/* Decide on the inlining. We do so in the topological order to avoid
expenses on updating data structures. */
-static void
+static unsigned int
cgraph_decide_inlining (void)
{
struct cgraph_node *node;
int nnodes;
struct cgraph_node **order =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int old_insns = 0;
int i;
timevar_push (TV_INLINE_HEURISTICS);
max_count = 0;
for (node = cgraph_nodes; node; node = node->next)
- {
- struct cgraph_edge *e;
- initial_insns += node->local.self_insns;
- for (e = node->callees; e; e = e->next_callee)
- if (max_count < e->count)
- max_count = e->count;
- }
+ if (node->analyzed && (node->needed || node->reachable))
+ {
+ struct cgraph_edge *e;
+
+ /* At the moment, no IPA passes change function bodies before inlining.
+ Save some time by not recomputing function body sizes if early inlining
+ already did so. */
+ if (!flag_early_inlining)
+ node->local.self_insns = node->global.insns
+ = estimate_num_insns (node->decl);
+
+ initial_insns += node->local.self_insns;
+ gcc_assert (node->local.self_insns == node->global.insns);
+ for (e = node->callees; e; e = e->next_callee)
+ if (max_count < e->count)
+ max_count = e->count;
+ }
overall_insns = initial_insns;
gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
- max_insns = ((HOST_WIDEST_INT) overall_insns
+ max_insns = overall_insns;
+ if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
+ max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
+
+ max_insns = ((HOST_WIDEST_INT) max_insns
* (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
nnodes = cgraph_postorder (order);
if (cgraph_recursive_inlining_p (e->caller, e->callee,
&e->inline_failed))
continue;
- cgraph_mark_inline_edge (e);
+ cgraph_mark_inline_edge (e, true);
if (dump_file)
fprintf (dump_file,
" Inlined into %s which now has %i insns.\n",
}
if (!flag_really_no_inline)
- {
- cgraph_decide_inlining_of_small_functions ();
+ cgraph_decide_inlining_of_small_functions ();
+ if (!flag_really_no_inline
+ && flag_inline_functions_called_once)
+ {
if (dump_file)
fprintf (dump_file, "\nDeciding on functions called once:\n");
if (ok)
{
if (dump_file)
- fprintf (dump_file,
- "\nConsidering %s %i insns.\n"
- " Called once from %s %i insns.\n",
- cgraph_node_name (node), node->global.insns,
- cgraph_node_name (node->callers->caller),
- node->callers->caller->global.insns);
+ {
+ fprintf (dump_file,
+ "\nConsidering %s %i insns.\n",
+ cgraph_node_name (node), node->global.insns);
+ fprintf (dump_file,
+ " Called once from %s %i insns.\n",
+ cgraph_node_name (node->callers->caller),
+ node->callers->caller->global.insns);
+ }
old_insns = overall_insns;
if (cgraph_check_inline_limits (node->callers->caller, node,
- NULL))
+ NULL, false))
{
cgraph_mark_inline (node->callers);
if (dump_file)
overall_insns);
free (order);
timevar_pop (TV_INLINE_HEURISTICS);
+ return 0;
}
/* Decide on the inlining. We do so in the topological order to avoid
&& !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
/* ??? It is possible that renaming variable removed the function body
in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */
- && DECL_SAVED_TREE (e->callee->decl))
+ && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
{
if (dump_file && early)
- fprintf (dump_file, " Early inlining %s into %s\n",
- cgraph_node_name (e->callee), cgraph_node_name (node));
+ {
+ fprintf (dump_file, " Early inlining %s",
+ cgraph_node_name (e->callee));
+ fprintf (dump_file, " into %s\n", cgraph_node_name (node));
+ }
cgraph_mark_inline (e);
inlined = true;
}
&& !e->callee->local.disregard_inline_limits
&& !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
&& (!early
- || (cgraph_estimate_size_after_inlining (1, e->caller, node)
+ || (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
<= e->caller->global.insns))
- && cgraph_check_inline_limits (node, e->callee, &e->inline_failed)
- && DECL_SAVED_TREE (e->callee->decl))
+ && cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
+ false)
+ && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
{
if (cgraph_default_inline_p (e->callee, &failed_reason))
{
if (dump_file && early)
- fprintf (dump_file, " Early inlining %s into %s\n",
- cgraph_node_name (e->callee), cgraph_node_name (node));
+ {
+ fprintf (dump_file, " Early inlining %s",
+ cgraph_node_name (e->callee));
+ fprintf (dump_file, " into %s\n", cgraph_node_name (node));
+ }
cgraph_mark_inline (e);
inlined = true;
}
node->local.self_insns = node->global.insns;
current_function_decl = NULL;
pop_cfun ();
- ggc_collect ();
}
return inlined;
}
0 /* letter */
};
+/* Because inlining might remove no-longer reachable nodes, we need to
+ keep the array visible to garbage collector to avoid reading collected
+ out nodes. */
+static int nnodes;
+static GTY ((length ("nnodes"))) struct cgraph_node **order;
+
/* Do inlining of small functions. Doing so early helps profiling and other
passes to be somewhat more effective and avoids some code duplication in
later real inlining pass for testcases with very many function calls. */
-static void
+static unsigned int
cgraph_early_inlining (void)
{
struct cgraph_node *node;
- int nnodes;
- struct cgraph_node **order =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
int i;
if (sorrycount || errorcount)
- return;
+ return 0;
#ifdef ENABLE_CHECKING
for (node = cgraph_nodes; node; node = node->next)
gcc_assert (!node->aux);
#endif
+ order = ggc_alloc (sizeof (*order) * cgraph_n_nodes);
nnodes = cgraph_postorder (order);
for (i = nnodes - 1; i >= 0; i--)
{
node = order[i];
+ if (node->analyzed && (node->needed || node->reachable))
+ node->local.self_insns = node->global.insns
+ = estimate_num_insns (node->decl);
+ }
+ for (i = nnodes - 1; i >= 0; i--)
+ {
+ node = order[i];
if (node->analyzed && node->local.inlinable
&& (node->needed || node->reachable)
&& node->callers)
- cgraph_decide_inlining_incrementally (node, true);
+ {
+ if (cgraph_decide_inlining_incrementally (node, true))
+ ggc_collect ();
+ }
}
cgraph_remove_unreachable_nodes (true, dump_file);
#ifdef ENABLE_CHECKING
for (node = cgraph_nodes; node; node = node->next)
gcc_assert (!node->global.inlined_to);
#endif
- free (order);
+ ggc_free (order);
+ order = NULL;
+ nnodes = 0;
+ return 0;
}
/* When inlining shall be performed. */
TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
0 /* letter */
};
+
+#include "gt-ipa-inline.h"