/* Inlining decision heuristics.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* Inlining decision heuristics
#include "coverage.h"
#include "ggc.h"
#include "tree-flow.h"
+#include "rtl.h"
/* Mode incremental inliner operate on:
In that case just go ahead and re-use it. */
if (!e->callee->callers->next_caller
&& !e->callee->needed
+ && !cgraph_new_nodes
&& flag_unit_at_a_time)
{
gcc_assert (!e->callee->global.inlined_to);
else
{
struct cgraph_node *n;
- n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
+ n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
update_original);
cgraph_redirect_edge_callee (e, n);
}
struct cgraph_node *to = edge->caller;
struct cgraph_node *what = edge->callee;
struct cgraph_edge *e, *next;
- int times = 0;
+ gcc_assert (!CALL_CANNOT_INLINE_P (edge->call_stmt));
/* Look for all calls, mark them inline and clone recursively
all inlined functions. */
for (e = what->callers; e; e = next)
cgraph_mark_inline_edge (e, true);
if (e == edge)
edge = next;
- times++;
}
}
- gcc_assert (times);
+
return edge;
}
&& (edge->count
<= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
return false;
+ if (lookup_attribute ("cold", DECL_ATTRIBUTES (edge->callee->decl))
+ || lookup_attribute ("cold", DECL_ATTRIBUTES (edge->caller->decl)))
+ return false;
+ if (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl)))
+ return true;
+ if (flag_guess_branch_prob
+ && edge->frequency < (CGRAPH_FREQ_MAX
+ / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
+ return false;
return true;
}
smallest badness are inlined first. After each inlining is performed
the costs of all caller edges of nodes affected are recomputed so the
metrics may accurately depend on values such as number of inlinable callers
- of the function or function body size.
-
- With profiling we use number of executions of each edge to drive the cost.
- We also should distinguish hot and cold calls where the cold calls are
- inlined into only when code size is overall improved.
- */
+ of the function or function body size. */
static int
cgraph_edge_badness (struct cgraph_edge *edge)
{
- if (max_count)
+ int badness;
+ int growth =
+ cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+
+ growth -= edge->caller->global.insns;
+
+ /* Always prefer inlining saving code size. */
+ if (growth <= 0)
+ badness = INT_MIN - growth;
+
+ /* When profiling is available, base priorities -(#calls / growth).
+ So we optimize for overall number of "executed" inlined calls. */
+ else if (max_count)
+ badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
+
+ /* When function local profile is available, base priorities on
+ growth / frequency, so we optimize for overall frequency of inlined
+ calls. This is not too accurate since while the call might be frequent
+ within function, the function itself is infrequent.
+
+ Other objective to optimize for is number of different calls inlined.
+ We add the estimated growth after inlining all functions to biass the
+ priorities slightly in this direction (so fewer times called functions
+ of the same size gets priority). */
+ else if (flag_guess_branch_prob)
{
+ int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
growth -= edge->caller->global.insns;
+ badness = growth * 256;
+
+ /* Decrease badness if call is nested. */
+ /* Compress the range so we don't overflow. */
+ if (div > 256)
+ div = 256 + ceil_log2 (div) - 8;
+ if (div < 1)
+ div = 1;
+ if (badness > 0)
+ badness /= div;
+ badness += cgraph_estimate_growth (edge->callee);
+ }
+ /* When function local profile is not available or it does not give
+ useful information (ie frequency is zero), base the cost on
+ loop nest and overall size growth, so we optimize for overall number
+ of functions fully inlined in program. */
+ else
+ {
+ int nest = MIN (edge->loop_nest, 8);
+ badness = cgraph_estimate_growth (edge->callee) * 256;
- /* Always prefer inlining saving code size. */
- if (growth <= 0)
- return INT_MIN - growth;
- return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
+ /* Decrease badness if call is nested. */
+ if (badness > 0)
+ badness >>= nest;
+ else
+ {
+ badness <<= nest;
+ }
}
+ /* Make recursive inlining happen always after other inlining is done. */
+ if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
+ return badness + 1;
else
- {
- int nest = MIN (edge->loop_nest, 8);
- int badness = cgraph_estimate_growth (edge->callee) * 256;
-
- /* Decrease badness if call is nested. */
- if (badness > 0)
- badness >>= nest;
- else
- badness <<= nest;
-
- /* Make recursive inlining happen always after other inlining is done. */
- if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
- return badness + 1;
- else
- return badness;
- }
+ return badness;
}
/* Recompute heap nodes for each of caller edge. */
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->aux)
{
- fibheap_delete_node (heap, edge->aux);
+ fibheap_delete_node (heap, (fibnode_t) edge->aux);
edge->aux = NULL;
if (edge->inline_failed)
edge->inline_failed = failed_reason;
int badness = cgraph_edge_badness (edge);
if (edge->aux)
{
- fibnode_t n = edge->aux;
+ fibnode_t n = (fibnode_t) edge->aux;
gcc_assert (n->data == edge);
if (n->key == badness)
continue;
/* fibheap_replace_key only increase the keys. */
if (fibheap_replace_key (heap, n, badness))
continue;
- fibheap_delete_node (heap, edge->aux);
+ fibheap_delete_node (heap, (fibnode_t) edge->aux);
}
edge->aux = fibheap_insert (heap, badness, edge);
}
int depth = 0;
int n = 0;
+ if (optimize_size)
+ return false;
+
if (DECL_DECLARED_INLINE_P (node->decl))
{
limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
cgraph_node_name (node));
/* We need original clone to copy around. */
- master_clone = cgraph_clone_node (node, node->count, 1, false);
+ master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
master_clone->needed = true;
for (e = master_clone->callees; e; e = e->next_callee)
if (!e->inline_failed)
&& (cgraph_estimate_size_after_inlining (1, node, master_clone)
<= limit))
{
- struct cgraph_edge *curr = fibheap_extract_min (heap);
+ struct cgraph_edge *curr
+ = (struct cgraph_edge *) fibheap_extract_min (heap);
struct cgraph_node *cnode;
depth = 1;
{
if (dump_file)
fprintf (dump_file,
- " maxmal depth reached\n");
+ " maximal depth reached\n");
continue;
}
max_insns = compute_max_insns (overall_insns);
min_insns = overall_insns;
- while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
+ while (overall_insns <= max_insns
+ && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
{
int old_insns = overall_insns;
struct cgraph_node *where;
fprintf (dump_file,
" to be inlined into %s\n"
" Estimated growth after inlined into all callees is %+i insns.\n"
- " Estimated badness is %i.\n",
+ " Estimated badness is %i, frequency %.2f.\n",
cgraph_node_name (edge->caller),
cgraph_estimate_growth (edge->callee),
- cgraph_edge_badness (edge));
+ cgraph_edge_badness (edge),
+ edge->frequency / (double)CGRAPH_FREQ_BASE);
if (edge->count)
fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
}
}
}
- if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
+ if ((!cgraph_maybe_hot_edge_p (edge) || optimize_size) && growth > 0)
{
if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
&edge->inline_failed))
else
{
struct cgraph_node *callee;
- if (!cgraph_check_inline_limits (edge->caller, edge->callee,
- &edge->inline_failed, true))
+ if (CALL_CANNOT_INLINE_P (edge->call_stmt)
+ || !cgraph_check_inline_limits (edge->caller, edge->callee,
+ &edge->inline_failed, true))
{
if (dump_file)
fprintf (dump_file, " Not inlining into %s:%s.\n",
fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
}
}
- while ((edge = fibheap_extract_min (heap)) != NULL)
+ while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
{
gcc_assert (edge->aux);
edge->aux = NULL;
for (e = node->callers; e; e = next)
{
next = e->next_caller;
- if (!e->inline_failed)
+ if (!e->inline_failed || CALL_CANNOT_INLINE_P (e->call_stmt))
continue;
if (cgraph_recursive_inlining_p (e->caller, e->callee,
&e->inline_failed))
if (node->callers && !node->callers->next_caller && !node->needed
&& node->local.inlinable && node->callers->inline_failed
+ && !CALL_CANNOT_INLINE_P (node->callers->call_stmt)
&& !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
{
if (dump_file)
try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
{
struct cgraph_node *callee = e->callee;
- enum inlining_mode callee_mode = (size_t) callee->aux;
+ enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
bool always_inline = e->callee->local.disregard_inline_limits;
/* We've hit cycle? */
verify_cgraph_node (node);
#endif
- old_mode = (size_t)node->aux;
+ old_mode = (enum inlining_mode) (size_t)node->aux;
if (mode != INLINE_ALWAYS_INLINE
&& lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
if (!e->callee->local.disregard_inline_limits
&& (mode != INLINE_ALL || !e->callee->local.inlinable))
continue;
+ if (CALL_CANNOT_INLINE_P (e->call_stmt))
+ continue;
/* When the edge is already inlined, we just need to recurse into
it in order to fully flatten the leaves. */
if (!e->inline_failed && mode == INLINE_ALL)
continue;
}
/* When the function body would grow and inlining the function won't
- elliminate the need for offline copy of the function, don't inline.
+ eliminate the need for offline copy of the function, don't inline.
*/
if (mode == INLINE_SIZE
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
continue;
}
if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
- false))
+ false)
+ || CALL_CANNOT_INLINE_P (e->call_stmt))
{
if (dump_file)
{
if (sorrycount || errorcount)
return 0;
if (cgraph_decide_inlining_incrementally (node,
- flag_unit_at_a_time
+ flag_unit_at_a_time || optimize_size
? INLINE_SIZE : INLINE_SPEED, 0))
{
timevar_push (TV_INTEGRATION);
node->local.inlinable = tree_inlinable_function_p (current_function_decl);
node->local.self_insns = estimate_num_insns (current_function_decl,
&eni_inlining_weights);
- if (node->local.inlinable)
+ if (node->local.inlinable && !node->local.disregard_inline_limits)
node->local.disregard_inline_limits
- = lang_hooks.tree_inlining.disregard_inline_limits (current_function_decl);
+ = disregard_inline_limits_p (current_function_decl);
if (flag_really_no_inline && !node->local.disregard_inline_limits)
node->local.inlinable = 0;
/* Inlining characteristics are maintained by the cgraph_mark_inline. */