/* Inlining decision heuristics.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* Inlining decision heuristics
struct cgraph_node *to = edge->caller;
struct cgraph_node *what = edge->callee;
struct cgraph_edge *e, *next;
- int times = 0;
+ gcc_assert (!CALL_CANNOT_INLINE_P (edge->call_stmt));
/* Look for all calls, mark them inline and clone recursively
all inlined functions. */
for (e = what->callers; e; e = next)
cgraph_mark_inline_edge (e, true);
if (e == edge)
edge = next;
- times++;
}
}
- gcc_assert (times);
+
return edge;
}
if (n->inline_decl)
decl = n->inline_decl;
- if (!DECL_INLINE (decl))
+ if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
{
if (reason)
- *reason = N_("function not inlinable");
+ *reason = N_("function not inline candidate");
return false;
}
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->aux)
{
- fibheap_delete_node (heap, edge->aux);
+ fibheap_delete_node (heap, (fibnode_t) edge->aux);
edge->aux = NULL;
if (edge->inline_failed)
edge->inline_failed = failed_reason;
int badness = cgraph_edge_badness (edge);
if (edge->aux)
{
- fibnode_t n = edge->aux;
+ fibnode_t n = (fibnode_t) edge->aux;
gcc_assert (n->data == edge);
if (n->key == badness)
continue;
/* fibheap_replace_key only increase the keys. */
if (fibheap_replace_key (heap, n, badness))
continue;
- fibheap_delete_node (heap, edge->aux);
+ fibheap_delete_node (heap, (fibnode_t) edge->aux);
}
edge->aux = fibheap_insert (heap, badness, edge);
}
int depth = 0;
int n = 0;
- if (optimize_size)
+ if (optimize_size
+ || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
return false;
if (DECL_DECLARED_INLINE_P (node->decl))
&& (cgraph_estimate_size_after_inlining (1, node, master_clone)
<= limit))
{
- struct cgraph_edge *curr = fibheap_extract_min (heap);
+ struct cgraph_edge *curr
+ = (struct cgraph_edge *) fibheap_extract_min (heap);
struct cgraph_node *cnode;
depth = 1;
{
if (dump_file)
fprintf (dump_file,
- " maxmal depth reached\n");
+ " maximal depth reached\n");
continue;
}
max_insns = compute_max_insns (overall_insns);
min_insns = overall_insns;
- while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
+ while (overall_insns <= max_insns
+ && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
{
int old_insns = overall_insns;
struct cgraph_node *where;
int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ const char *not_good = NULL;
growth -= edge->caller->global.insns;
}
}
- if ((!cgraph_maybe_hot_edge_p (edge) || optimize_size) && growth > 0)
+ if (!cgraph_maybe_hot_edge_p (edge))
+ not_good = N_("call is unlikely and code size would grow");
+ if (!flag_inline_functions
+ && !DECL_DECLARED_INLINE_P (edge->callee->decl))
+ not_good = N_("function not declared inline and code size would grow");
+ if (optimize_size)
+ not_good = N_("optimizing for size and code size would grow");
+ if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
{
if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
&edge->inline_failed))
{
- edge->inline_failed =
- N_("call is unlikely");
+ edge->inline_failed = not_good;
if (dump_file)
fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
}
else
{
struct cgraph_node *callee;
- if (!cgraph_check_inline_limits (edge->caller, edge->callee,
- &edge->inline_failed, true))
+ if (CALL_CANNOT_INLINE_P (edge->call_stmt)
+ || !cgraph_check_inline_limits (edge->caller, edge->callee,
+ &edge->inline_failed, true))
{
if (dump_file)
fprintf (dump_file, " Not inlining into %s:%s.\n",
fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
}
}
- while ((edge = fibheap_extract_min (heap)) != NULL)
+ while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
{
gcc_assert (edge->aux);
edge->aux = NULL;
for (e = node->callers; e; e = next)
{
next = e->next_caller;
- if (!e->inline_failed)
+ if (!e->inline_failed || CALL_CANNOT_INLINE_P (e->call_stmt))
continue;
if (cgraph_recursive_inlining_p (e->caller, e->callee,
&e->inline_failed))
if (node->callers && !node->callers->next_caller && !node->needed
&& node->local.inlinable && node->callers->inline_failed
+ && !CALL_CANNOT_INLINE_P (node->callers->call_stmt)
&& !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
{
if (dump_file)
try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
{
struct cgraph_node *callee = e->callee;
- enum inlining_mode callee_mode = (size_t) callee->aux;
+ enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
bool always_inline = e->callee->local.disregard_inline_limits;
/* We've hit cycle? */
verify_cgraph_node (node);
#endif
- old_mode = (size_t)node->aux;
+ old_mode = (enum inlining_mode) (size_t)node->aux;
if (mode != INLINE_ALWAYS_INLINE
&& lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
if (!e->callee->local.disregard_inline_limits
&& (mode != INLINE_ALL || !e->callee->local.inlinable))
continue;
+ if (CALL_CANNOT_INLINE_P (e->call_stmt))
+ continue;
/* When the edge is already inlined, we just need to recurse into
it in order to fully flatten the leaves. */
if (!e->inline_failed && mode == INLINE_ALL)
/* When the function body would grow and inlining the function won't
eliminate the need for offline copy of the function, don't inline.
*/
- if (mode == INLINE_SIZE
+ if ((mode == INLINE_SIZE
+ || (!flag_inline_functions
+ && !DECL_DECLARED_INLINE_P (e->callee->decl)))
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
> e->caller->global.insns)
&& cgraph_estimate_growth (e->callee) > 0)
continue;
}
if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
- false))
+ false)
+ || CALL_CANNOT_INLINE_P (e->call_stmt))
{
if (dump_file)
{
return flag_inline_trees;
}
-struct tree_opt_pass pass_ipa_inline =
+struct simple_ipa_opt_pass pass_ipa_inline =
{
+ {
+ SIMPLE_IPA_PASS,
"inline", /* name */
cgraph_gate_inlining, /* gate */
cgraph_decide_inlining, /* execute */
0, /* properties_destroyed */
TODO_remove_functions, /* todo_flags_finish */
TODO_dump_cgraph | TODO_dump_func
- | TODO_remove_functions, /* todo_flags_finish */
- 0 /* letter */
+ | TODO_remove_functions /* todo_flags_finish */
+ }
};
/* Because inlining might remove no-longer reachable nodes, we need to
return flag_inline_trees && flag_early_inlining;
}
-struct tree_opt_pass pass_early_inline =
+struct gimple_opt_pass pass_early_inline =
{
+ {
+ GIMPLE_PASS,
"einline", /* name */
cgraph_gate_early_inlining, /* gate */
cgraph_early_inlining, /* execute */
PROP_cfg, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_func /* todo_flags_finish */
+ }
};
/* When inlining shall be performed. */
/* IPA pass wrapper for early inlining pass. We need to run early inlining
before tree profiling so we have stand alone IPA pass for doing so. */
-struct tree_opt_pass pass_ipa_early_inline =
+struct simple_ipa_opt_pass pass_ipa_early_inline =
{
+ {
+ SIMPLE_IPA_PASS,
"einline_ipa", /* name */
cgraph_gate_ipa_early_inlining, /* gate */
NULL, /* execute */
PROP_cfg, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_cgraph, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_cgraph /* todo_flags_finish */
+ }
};
/* Compute parameters of functions used by inliner. */
node->local.inlinable = tree_inlinable_function_p (current_function_decl);
node->local.self_insns = estimate_num_insns (current_function_decl,
&eni_inlining_weights);
- if (node->local.inlinable)
+ if (node->local.inlinable && !node->local.disregard_inline_limits)
node->local.disregard_inline_limits
- = lang_hooks.tree_inlining.disregard_inline_limits (current_function_decl);
+ = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
if (flag_really_no_inline && !node->local.disregard_inline_limits)
node->local.inlinable = 0;
/* Inlining characteristics are maintained by the cgraph_mark_inline. */
return flag_inline_trees;
}
-struct tree_opt_pass pass_inline_parameters =
+struct gimple_opt_pass pass_inline_parameters =
{
+ {
+ GIMPLE_PASS,
NULL, /* name */
gate_inline_passes, /* gate */
compute_inline_parameters, /* execute */
PROP_cfg, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- 0, /* todo_flags_finish */
- 0 /* letter */
+ 0 /* todo_flags_finish */
+ }
};
/* Apply inline plan to the function. */
return todo | execute_fixup_cfg ();
}
-struct tree_opt_pass pass_apply_inline =
+struct gimple_opt_pass pass_apply_inline =
{
+ {
+ GIMPLE_PASS,
"apply_inline", /* name */
NULL, /* gate */
apply_inline, /* execute */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func | TODO_verify_flow
- | TODO_verify_stmts, /* todo_flags_finish */
- 0 /* letter */
+ | TODO_verify_stmts /* todo_flags_finish */
+ }
};
#include "gt-ipa-inline.h"