You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* Inlining decision heuristics
#include "fibheap.h"
#include "intl.h"
#include "tree-pass.h"
+#include "hashtab.h"
+#include "coverage.h"
+#include "ggc.h"
/* Statistics we collect about inlining algorithm. */
static int ncalls_inlined;
static int nfunctions_inlined;
static int initial_insns;
static int overall_insns;
+static int max_insns;
+static gcov_type max_count;
/* Estimate size of the function after inlining WHAT into TO. */
cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
struct cgraph_node *what)
{
- tree fndecl = what->decl;
- tree arg;
+ int size;
+ tree fndecl = what->decl, arg;
int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
+
for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
call_insns += estimate_move_cost (TREE_TYPE (arg));
- return (what->global.insns - call_insns) * times + to->global.insns;
+ size = (what->global.insns - call_insns) * times + to->global.insns;
+ gcc_assert (size >= 0);
+ return size;
}
/* E is expected to be an edge being inlined. Clone destination node of
}
else if (duplicate)
{
- n = cgraph_clone_node (e->callee, e->count, e->loop_nest);
+ n = cgraph_clone_node (e->callee, e->count, e->loop_nest, true);
cgraph_redirect_edge_callee (e, n);
}
{
int growth = 0;
struct cgraph_edge *e;
+ if (node->global.estimated_growth != INT_MIN)
+ return node->global.estimated_growth;
for (e = node->callers; e; e = e->next_caller)
if (e->inline_failed)
if (!node->needed && !DECL_EXTERNAL (node->decl))
growth -= node->global.insns;
+ node->global.estimated_growth = growth;
return growth;
}
/* Return true when function N is small enough to be inlined. */
bool
-cgraph_default_inline_p (struct cgraph_node *n)
+cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
{
- if (!DECL_INLINE (n->decl) || !DECL_SAVED_TREE (n->decl))
- return false;
+ if (!DECL_INLINE (n->decl))
+ {
+ if (reason)
+ *reason = N_("function not inlinable");
+ return false;
+ }
+
+ if (!DECL_SAVED_TREE (n->decl))
+ {
+ if (reason)
+ *reason = N_("function body not available");
+ return false;
+ }
+
if (DECL_DECLARED_INLINE_P (n->decl))
- return n->global.insns < MAX_INLINE_INSNS_SINGLE;
+ {
+ if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
+ {
+ if (reason)
+ *reason = N_("--param max-inline-insns-single limit reached");
+ return false;
+ }
+ }
else
- return n->global.insns < MAX_INLINE_INSNS_AUTO;
+ {
+ if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
+ {
+ if (reason)
+ *reason = N_("--param max-inline-insns-auto limit reached");
+ return false;
+ }
+ }
+
+ return true;
}
/* Return true when inlining WHAT would create recursive inlining.
return recursive;
}
-/* Recompute heap nodes for each of callees. */
+/* Return true if the call can be hot. */
+static bool
+cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
+{
+ if (profile_info && flag_branch_probabilities
+ && (edge->count
+ <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
+ return false;
+ return true;
+}
+
+/* A cost model driving the inlining heuristics in a way so the edges with
+ smallest badness are inlined first. After each inlining is performed
+ the costs of all caller edges of nodes affected are recomputed so the
+ metrics may accurately depend on values such as number of inlinable callers
+ of the function or function body size.
+
+ With profiling we use number of executions of each edge to drive the cost.
+ We also should distinguish hot and cold calls where the cold calls are
+ inlined into only when code size is overall improved.
+ */
+
+static int
+cgraph_edge_badness (struct cgraph_edge *edge)
+{
+ if (max_count)
+ {
+ int growth =
+ cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ growth -= edge->caller->global.insns;
+
+ /* Always prefer inlining saving code size. */
+ if (growth <= 0)
+ return INT_MIN - growth;
+ return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
+ }
+ else
+ {
+ int nest = MIN (edge->loop_nest, 8);
+ int badness = cgraph_estimate_growth (edge->callee) * 256;
+
+ /* Decrease badness if call is nested. */
+ if (badness > 0)
+ badness >>= nest;
+ else
+ badness <<= nest;
+
+ /* Make recursive inlining happen always after other inlining is done. */
+ if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
+ return badness + 1;
+ else
+ return badness;
+ }
+}
+
+/* Recompute heap nodes for each of caller edge. */
+
static void
-update_callee_keys (fibheap_t heap, struct fibnode **heap_node,
- struct cgraph_node *node)
+update_caller_keys (fibheap_t heap, struct cgraph_node *node,
+ bitmap updated_nodes)
+{
+ struct cgraph_edge *edge;
+
+ if (!node->local.inlinable || node->local.disregard_inline_limits
+ || node->global.inlined_to)
+ return;
+ if (bitmap_bit_p (updated_nodes, node->uid))
+ return;
+ bitmap_set_bit (updated_nodes, node->uid);
+ node->global.estimated_growth = INT_MIN;
+
+ for (edge = node->callers; edge; edge = edge->next_caller)
+ if (edge->inline_failed)
+ {
+ int badness = cgraph_edge_badness (edge);
+ if (edge->aux)
+ {
+ fibnode_t n = edge->aux;
+ gcc_assert (n->data == edge);
+ if (n->key == badness)
+ continue;
+
+ /* fibheap_replace_key only increase the keys. */
+ if (fibheap_replace_key (heap, n, badness))
+ continue;
+ fibheap_delete_node (heap, edge->aux);
+ }
+ edge->aux = fibheap_insert (heap, badness, edge);
+ }
+}
+
+/* Recompute heap nodes for each of caller edges of each of callees. */
+
+static void
+update_callee_keys (fibheap_t heap, struct cgraph_node *node,
+ bitmap updated_nodes)
{
struct cgraph_edge *e;
+ node->global.estimated_growth = INT_MIN;
for (e = node->callees; e; e = e->next_callee)
- if (e->inline_failed && heap_node[e->callee->uid])
- fibheap_replace_key (heap, heap_node[e->callee->uid],
- cgraph_estimate_growth (e->callee));
+ if (e->inline_failed)
+ update_caller_keys (heap, e->callee, updated_nodes);
else if (!e->inline_failed)
- update_callee_keys (heap, heap_node, e->callee);
+ update_callee_keys (heap, e->callee, updated_nodes);
}
-/* Enqueue all recursive calls from NODE into queue linked via aux pointers
- in between FIRST and LAST. WHERE is used for bookkeeping while looking
- int calls inlined within NODE. */
+/* Enqueue all recursive calls from NODE into priority queue depending on
+ how likely we want to recursively inline the call. */
+
static void
lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
- struct cgraph_edge **first, struct cgraph_edge **last)
+ fibheap_t heap)
{
+ static int priority;
struct cgraph_edge *e;
for (e = where->callees; e; e = e->next_callee)
if (e->callee == node)
{
- if (!*first)
- *first = e;
- else
- (*last)->aux = e;
- *last = e;
+ /* When profile feedback is available, prioritize by expected number
+ of calls. Without profile feedback we maintain simple queue
+ to order candidates via recursive depths. */
+ fibheap_insert (heap,
+ !max_count ? priority++
+ : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
+ e);
}
for (e = where->callees; e; e = e->next_callee)
if (!e->inline_failed)
- lookup_recursive_calls (node, e->callee, first, last);
+ lookup_recursive_calls (node, e->callee, heap);
+}
+
+/* Find callgraph nodes closing a circle in the graph. The
+ resulting hashtab can be used to avoid walking the circles.
+ Uses the cgraph nodes ->aux field which needs to be zero
+ before and will be zero after operation. */
+
+static void
+cgraph_find_cycles (struct cgraph_node *node, htab_t cycles)
+{
+ struct cgraph_edge *e;
+
+ if (node->aux)
+ {
+ void **slot;
+ slot = htab_find_slot (cycles, node, INSERT);
+ if (!*slot)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Cycle contains %s\n", cgraph_node_name (node));
+ *slot = node;
+ }
+ return;
+ }
+
+ node->aux = node;
+ for (e = node->callees; e; e = e->next_callee)
+ cgraph_find_cycles (e->callee, cycles);
+ node->aux = 0;
+}
+
+/* Leafify the cgraph node. We have to be careful in recursing
+ as to not run endlessly in circles of the callgraph.
+ We do so by using a hashtab of cycle entering nodes as generated
+ by cgraph_find_cycles. */
+
+static void
+cgraph_flatten_node (struct cgraph_node *node, htab_t cycles)
+{
+ struct cgraph_edge *e;
+
+ for (e = node->callees; e; e = e->next_callee)
+ {
+ /* Inline call, if possible, and recurse. Be sure we are not
+ entering callgraph circles here. */
+ if (e->inline_failed
+ && e->callee->local.inlinable
+ && !cgraph_recursive_inlining_p (node, e->callee,
+ &e->inline_failed)
+ && !htab_find (cycles, e->callee))
+ {
+ if (dump_file)
+ fprintf (dump_file, " inlining %s", cgraph_node_name (e->callee));
+ cgraph_mark_inline_edge (e);
+ cgraph_flatten_node (e->callee, cycles);
+ }
+ else if (dump_file)
+ fprintf (dump_file, " !inlining %s", cgraph_node_name (e->callee));
+ }
}
/* Decide on recursive inlining: in the case function has recursive calls,
inline until body size reaches given argument. */
-static void
+
+static bool
cgraph_decide_recursive_inlining (struct cgraph_node *node)
{
int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
- struct cgraph_edge *first_call = NULL, *last_call = NULL;
- struct cgraph_edge *last_in_current_depth;
+ int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
+ fibheap_t heap;
struct cgraph_edge *e;
struct cgraph_node *master_clone;
int depth = 0;
/* Make sure that function is small enough to be considered for inlining. */
if (!max_depth
|| cgraph_estimate_size_after_inlining (1, node, node) >= limit)
- return;
- lookup_recursive_calls (node, node, &first_call, &last_call);
- if (!first_call)
- return;
+ return false;
+ heap = fibheap_new ();
+ lookup_recursive_calls (node, node, heap);
+ if (fibheap_empty (heap))
+ {
+ fibheap_delete (heap);
+ return false;
+ }
if (dump_file)
fprintf (dump_file,
- "\nPerforming recursive inlining on %s\n",
+ " Performing recursive inlining on %s\n",
cgraph_node_name (node));
/* We need original clone to copy around. */
- master_clone = cgraph_clone_node (node, 0, 1);
+ master_clone = cgraph_clone_node (node, node->count, 1, false);
master_clone->needed = true;
for (e = master_clone->callees; e; e = e->next_callee)
if (!e->inline_failed)
cgraph_clone_inlined_nodes (e, true);
/* Do the inlining and update list of recursive call during process. */
- last_in_current_depth = last_call;
- while (first_call
- && cgraph_estimate_size_after_inlining (1, node, master_clone) <= limit)
+ while (!fibheap_empty (heap)
+ && (cgraph_estimate_size_after_inlining (1, node, master_clone)
+ <= limit))
{
- struct cgraph_edge *curr = first_call;
+ struct cgraph_edge *curr = fibheap_extract_min (heap);
+ struct cgraph_node *cnode;
+
+ depth = 1;
+ for (cnode = curr->caller;
+ cnode->global.inlined_to; cnode = cnode->callers->caller)
+ if (node->decl == curr->callee->decl)
+ depth++;
+ if (depth > max_depth)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ " maxmal depth reached\n");
+ continue;
+ }
- first_call = first_call->aux;
- curr->aux = NULL;
+ if (max_count)
+ {
+ if (!cgraph_maybe_hot_edge_p (curr))
+ {
+ if (dump_file)
+ fprintf (dump_file, " Not inlining cold call\n");
+ continue;
+ }
+ if (curr->count * 100 / node->count < probability)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ " Probability of edge is too small\n");
+ continue;
+ }
+ }
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ " Inlining call of depth %i", depth);
+ if (node->count)
+ {
+ fprintf (dump_file, " called approx. %.2f times per call",
+ (double)curr->count / node->count);
+ }
+ fprintf (dump_file, "\n");
+ }
cgraph_redirect_edge_callee (curr, master_clone);
cgraph_mark_inline_edge (curr);
- lookup_recursive_calls (node, curr->callee, &first_call, &last_call);
-
- if (last_in_current_depth
- && ++depth >= max_depth)
- break;
+ lookup_recursive_calls (node, curr->callee, heap);
n++;
}
+ if (!fibheap_empty (heap) && dump_file)
+ fprintf (dump_file, " Recursive inlining growth limit met.\n");
- /* Cleanup queue pointers. */
- while (first_call)
- {
- struct cgraph_edge *next = first_call->aux;
- first_call->aux = NULL;
- first_call = next;
- }
+ fibheap_delete (heap);
if (dump_file)
fprintf (dump_file,
"\n Inlined %i times, body grown from %i to %i insns\n", n,
if (node->global.inlined_to == master_clone)
cgraph_remove_node (node);
cgraph_remove_node (master_clone);
+ /* FIXME: Recursive inlining actually reduces number of calls of the
+ function. At this place we should probably walk the function and
+ inline clones and compensate the counts accordingly. This probably
+ doesn't matter much in practice. */
+ return true;
}
/* Set inline_failed for all callers of given function to REASON. */
cgraph_decide_inlining_of_small_functions (void)
{
struct cgraph_node *node;
+ struct cgraph_edge *edge;
+ const char *failed_reason;
fibheap_t heap = fibheap_new ();
- struct fibnode **heap_node =
- xcalloc (cgraph_max_uid, sizeof (struct fibnode *));
- int max_insns = ((HOST_WIDEST_INT) initial_insns
- * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
+ bitmap updated_nodes = BITMAP_ALLOC (NULL);
+
+ if (dump_file)
+ fprintf (dump_file, "\nDeciding on smaller functions:\n");
/* Put all inline candidates into the heap. */
if (!node->local.inlinable || !node->callers
|| node->local.disregard_inline_limits)
continue;
+ if (dump_file)
+ fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
- if (!cgraph_default_inline_p (node))
+ node->global.estimated_growth = INT_MIN;
+ if (!cgraph_default_inline_p (node, &failed_reason))
{
- cgraph_set_inline_failed (node,
- N_("--param max-inline-insns-single limit reached"));
+ cgraph_set_inline_failed (node, failed_reason);
continue;
}
- heap_node[node->uid] =
- fibheap_insert (heap, cgraph_estimate_growth (node), node);
- }
- if (dump_file)
- fprintf (dump_file, "\nDeciding on smaller functions:\n");
- while (overall_insns <= max_insns && (node = fibheap_extract_min (heap)))
+ for (edge = node->callers; edge; edge = edge->next_caller)
+ if (edge->inline_failed)
+ {
+ gcc_assert (!edge->aux);
+ edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
+ }
+ }
+ while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
{
- struct cgraph_edge *e, *next;
int old_insns = overall_insns;
+ struct cgraph_node *where;
+ int growth =
+ cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+
+ growth -= edge->caller->global.insns;
- heap_node[node->uid] = NULL;
if (dump_file)
- fprintf (dump_file,
- "\nConsidering %s with %i insns\n"
- " Estimated growth is %+i insns.\n",
- cgraph_node_name (node), node->global.insns,
- cgraph_estimate_growth (node));
- if (!cgraph_default_inline_p (node))
{
- cgraph_set_inline_failed (node,
- N_("--param max-inline-insns-single limit reached after inlining into the callee"));
- continue;
+ fprintf (dump_file,
+ "\nConsidering %s with %i insns to be inlined into %s\n"
+ " Estimated growth after inlined into all callees is %+i insns.\n"
+ " Estimated badness is %i.\n",
+ cgraph_node_name (edge->callee),
+ edge->callee->global.insns,
+ cgraph_node_name (edge->caller),
+ cgraph_estimate_growth (edge->callee),
+ cgraph_edge_badness (edge));
+ if (edge->count)
+ fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
}
- for (e = node->callers; e; e = next)
- {
- next = e->next_caller;
- if (e->inline_failed)
- {
- struct cgraph_node *where;
-
- if (cgraph_recursive_inlining_p (e->caller, e->callee,
- &e->inline_failed)
- || !cgraph_check_inline_limits (e->caller, e->callee,
- &e->inline_failed))
- {
- if (dump_file)
- fprintf (dump_file, " Not inlining into %s:%s.\n",
- cgraph_node_name (e->caller), e->inline_failed);
- continue;
- }
- next = cgraph_mark_inline (e);
- where = e->caller;
- if (where->global.inlined_to)
- where = where->global.inlined_to;
+ gcc_assert (edge->aux);
+ edge->aux = NULL;
+ if (!edge->inline_failed)
+ continue;
- if (heap_node[where->uid])
- fibheap_replace_key (heap, heap_node[where->uid],
- cgraph_estimate_growth (where));
+ /* When not having profile info ready we don't weight by any way the
+ position of call in procedure itself. This means if call of
+ function A from function B seems profitable to inline, the recursive
+ call of function A in inline copy of A in B will look profitable too
+ and we end up inlining until reaching maximal function growth. This
+ is not good idea so prohibit the recursive inlining.
+ ??? When the frequencies are taken into account we might not need this
+ restriction. */
+ if (!max_count)
+ {
+ where = edge->caller;
+ while (where->global.inlined_to)
+ {
+ if (where->decl == edge->callee->decl)
+ break;
+ where = where->callers->caller;
+ }
+ if (where->global.inlined_to)
+ {
+ edge->inline_failed
+ = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
if (dump_file)
- fprintf (dump_file,
- " Inlined into %s which now has %i insns.\n",
- cgraph_node_name (e->caller),
- e->caller->global.insns);
+ fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
+ continue;
}
}
- cgraph_decide_recursive_inlining (node);
-
- /* Similarly all functions called by the function we just inlined
- are now called more times; update keys. */
- update_callee_keys (heap, heap_node, node);
+ if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
+ {
+ if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
+ &edge->inline_failed))
+ {
+ edge->inline_failed =
+ N_("call is unlikely");
+ if (dump_file)
+ fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
+ }
+ continue;
+ }
+ if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
+ {
+ if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
+ &edge->inline_failed))
+ {
+ if (dump_file)
+ fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
+ }
+ continue;
+ }
+ if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
+ &edge->inline_failed))
+ {
+ where = edge->caller;
+ if (where->global.inlined_to)
+ where = where->global.inlined_to;
+ if (!cgraph_decide_recursive_inlining (where))
+ continue;
+ update_callee_keys (heap, where, updated_nodes);
+ }
+ else
+ {
+ struct cgraph_node *callee;
+ if (!cgraph_check_inline_limits (edge->caller, edge->callee,
+ &edge->inline_failed))
+ {
+ if (dump_file)
+ fprintf (dump_file, " Not inlining into %s:%s.\n",
+ cgraph_node_name (edge->caller), edge->inline_failed);
+ continue;
+ }
+ callee = edge->callee;
+ cgraph_mark_inline_edge (edge);
+ update_callee_keys (heap, callee, updated_nodes);
+ }
+ where = edge->caller;
+ if (where->global.inlined_to)
+ where = where->global.inlined_to;
+
+ /* Our profitability metric can depend on local properties
+ such as number of inlinable calls and size of the function body.
+ After inlining these properties might change for the function we
+ inlined into (since it's body size changed) and for the functions
+ called by function we inlined (since number of it inlinable callers
+ might change). */
+ update_caller_keys (heap, where, updated_nodes);
+ bitmap_clear (updated_nodes);
if (dump_file)
fprintf (dump_file,
+ " Inlined into %s which now has %i insns.\n",
+ cgraph_node_name (edge->caller),
+ edge->caller->global.insns);
+ if (dump_file)
+ fprintf (dump_file,
" Inlined for a net change of %+i insns.\n",
overall_insns - old_insns);
}
- while ((node = fibheap_extract_min (heap)) != NULL)
- if (!node->local.disregard_inline_limits)
- cgraph_set_inline_failed (node, N_("--param inline-unit-growth limit reached"));
+ while ((edge = fibheap_extract_min (heap)) != NULL)
+ {
+ gcc_assert (edge->aux);
+ edge->aux = NULL;
+ if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
+ && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
+ &edge->inline_failed))
+ edge->inline_failed = N_("--param inline-unit-growth limit reached");
+ }
fibheap_delete (heap);
- free (heap_node);
+ BITMAP_FREE (updated_nodes);
}
/* Decide on the inlining. We do so in the topological order to avoid
int old_insns = 0;
int i;
+ timevar_push (TV_INLINE_HEURISTICS);
+ max_count = 0;
for (node = cgraph_nodes; node; node = node->next)
- initial_insns += node->local.self_insns;
+ {
+ struct cgraph_edge *e;
+ initial_insns += node->local.self_insns;
+ for (e = node->callees; e; e = e->next_callee)
+ if (max_count < e->count)
+ max_count = e->count;
+ }
overall_insns = initial_insns;
+ gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
+
+ max_insns = ((HOST_WIDEST_INT) overall_insns
+ * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
nnodes = cgraph_postorder (order);
node = order[i];
+ /* Handle nodes to be flattened, but don't update overall unit size. */
+ if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
+ {
+ int old_overall_insns = overall_insns;
+ htab_t cycles;
+ if (dump_file)
+ fprintf (dump_file,
+ "Leafifying %s\n", cgraph_node_name (node));
+ cycles = htab_create (7, htab_hash_pointer, htab_eq_pointer, NULL);
+ cgraph_find_cycles (node, cycles);
+ cgraph_flatten_node (node, cycles);
+ htab_delete (cycles);
+ overall_insns = old_overall_insns;
+ /* We don't need to consider always_inline functions inside the flattened
+ function anymore. */
+ continue;
+ }
+
if (!node->local.disregard_inline_limits)
continue;
if (dump_file)
}
if (!flag_really_no_inline)
- {
- cgraph_decide_inlining_of_small_functions ();
+ cgraph_decide_inlining_of_small_functions ();
+ if (!flag_really_no_inline
+ && flag_inline_functions_called_once)
+ {
if (dump_file)
fprintf (dump_file, "\nDeciding on functions called once:\n");
}
}
- /* We will never output extern functions we didn't inline.
- ??? Perhaps we can prevent accounting of growth of external
- inline functions. */
- cgraph_remove_unreachable_nodes (false, dump_file);
-
if (dump_file)
fprintf (dump_file,
"\nInlined %i calls, eliminated %i functions, "
ncalls_inlined, nfunctions_inlined, initial_insns,
overall_insns);
free (order);
+ timevar_pop (TV_INLINE_HEURISTICS);
}
/* Decide on the inlining. We do so in the topological order to avoid
expenses on updating data structures. */
-void
-cgraph_decide_inlining_incrementally (struct cgraph_node *node)
+bool
+cgraph_decide_inlining_incrementally (struct cgraph_node *node, bool early)
{
struct cgraph_edge *e;
+ bool inlined = false;
+ const char *failed_reason;
/* First of all look for always inline functions. */
for (e = node->callees; e; e = e->next_callee)
/* ??? It is possible that renaming variable removed the function body
in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */
&& DECL_SAVED_TREE (e->callee->decl))
- cgraph_mark_inline (e);
+ {
+ if (dump_file && early)
+ fprintf (dump_file, " Early inlining %s into %s\n",
+ cgraph_node_name (e->callee), cgraph_node_name (node));
+ cgraph_mark_inline (e);
+ inlined = true;
+ }
/* Now do the automatic inlining. */
if (!flag_really_no_inline)
&& e->inline_failed
&& !e->callee->local.disregard_inline_limits
&& !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
+ && (!early
+ || (cgraph_estimate_size_after_inlining (1, e->caller, node)
+ <= e->caller->global.insns))
&& cgraph_check_inline_limits (node, e->callee, &e->inline_failed)
&& DECL_SAVED_TREE (e->callee->decl))
{
- if (cgraph_default_inline_p (e->callee))
- cgraph_mark_inline (e);
- else
- e->inline_failed
- = N_("--param max-inline-insns-single limit reached");
+ if (cgraph_default_inline_p (e->callee, &failed_reason))
+ {
+ if (dump_file && early)
+ fprintf (dump_file, " Early inlining %s into %s\n",
+ cgraph_node_name (e->callee), cgraph_node_name (node));
+ cgraph_mark_inline (e);
+ inlined = true;
+ }
+ else if (!early)
+ e->inline_failed = failed_reason;
}
+ if (early && inlined)
+ {
+ push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ tree_register_cfg_hooks ();
+ current_function_decl = node->decl;
+ optimize_inline_calls (current_function_decl);
+ node->local.self_insns = node->global.insns;
+ current_function_decl = NULL;
+ pop_cfun ();
+ }
+ return inlined;
}
/* When inlining shall be performed. */
0, /* static_pass_number */
TV_INTEGRATION, /* tv_id */
0, /* properties_required */
- PROP_trees, /* properties_provided */
+ PROP_cfg, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
+ 0 /* letter */
+};
+
+/* Do inlining of small functions. Doing so early helps profiling and other
+ passes to be somewhat more effective and avoids some code duplication in
+ later real inlining pass for testcases with very many function calls. */
+static void
+cgraph_early_inlining (void)
+{
+ struct cgraph_node *node;
+ int nnodes;
+ struct cgraph_node **order =
+ xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ int i;
+
+ if (sorrycount || errorcount)
+ return;
+#ifdef ENABLE_CHECKING
+ for (node = cgraph_nodes; node; node = node->next)
+ gcc_assert (!node->aux);
+#endif
+
+ nnodes = cgraph_postorder (order);
+ for (i = nnodes - 1; i >= 0; i--)
+ {
+ node = order[i];
+ if (node->analyzed && node->local.inlinable
+ && (node->needed || node->reachable)
+ && node->callers)
+ cgraph_decide_inlining_incrementally (node, true);
+ }
+ cgraph_remove_unreachable_nodes (true, dump_file);
+#ifdef ENABLE_CHECKING
+ for (node = cgraph_nodes; node; node = node->next)
+ gcc_assert (!node->global.inlined_to);
+#endif
+ free (order);
+}
+
+/* When inlining shall be performed. */
+static bool
+cgraph_gate_early_inlining (void)
+{
+ return flag_inline_trees && flag_early_inlining;
+}
+
+struct tree_opt_pass pass_early_ipa_inline =
+{
+ "einline", /* name */
+ cgraph_gate_early_inlining, /* gate */
+ cgraph_early_inlining, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_INTEGRATION, /* tv_id */
+ 0, /* properties_required */
+ PROP_cfg, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */