/* Return true when function N is small enough to be inlined. */
bool
-cgraph_default_inline_p (struct cgraph_node *n)
+cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
{
- if (!DECL_INLINE (n->decl) || !DECL_SAVED_TREE (n->decl))
- return false;
+ if (!DECL_INLINE (n->decl))
+ {
+ if (reason)
+ *reason = N_("function not inlinable");
+ return false;
+ }
+
+ if (!DECL_SAVED_TREE (n->decl))
+ {
+ if (reason)
+ *reason = N_("function body not available");
+ return false;
+ }
+
if (DECL_DECLARED_INLINE_P (n->decl))
- return n->global.insns < MAX_INLINE_INSNS_SINGLE;
+ {
+ if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
+ {
+ if (reason)
+ *reason = N_("--param max-inline-insns-single limit reached");
+ return false;
+ }
+ }
else
- return n->global.insns < MAX_INLINE_INSNS_AUTO;
+ {
+ if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
+ {
+ if (reason)
+ *reason = N_("--param max-inline-insns-auto limit reached");
+ return false;
+ }
+ }
+
+ return true;
}
/* Return true when inlining WHAT would create recursive inlining.
metrics may accurately depend on values such as number of inlinable callers
of the function or function body size.
- For the moment we use estimated growth caused by inlining callee into all
- it's callers for driving the inlining but once we have loop depth or
- frequency information readily available we should do better.
-
With profiling we use number of executions of each edge to drive the cost.
We also should distinguish hot and cold calls where the cold calls are
inlined into only when code size is overall improved.
-
- Value INT_MAX can be returned to prevent function from being inlined.
*/
static int
{
int nest = MIN (edge->loop_nest, 8);
int badness = cgraph_estimate_growth (edge->callee) * 256;
-
- badness >>= nest;
+
+ /* Decrease badness if call is nested. */
+ if (badness > 0)
+ badness >>= nest;
+ else
+ badness <<= nest;
/* Make recursive inlining happen always after other inlining is done. */
if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
{
struct cgraph_node *node;
struct cgraph_edge *edge;
+ const char *failed_reason;
fibheap_t heap = fibheap_new ();
bitmap updated_nodes = BITMAP_ALLOC (NULL);
fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
node->global.estimated_growth = INT_MIN;
- if (!cgraph_default_inline_p (node))
+ if (!cgraph_default_inline_p (node, &failed_reason))
{
- cgraph_set_inline_failed (node,
- N_("--param max-inline-insns-single limit reached"));
+ cgraph_set_inline_failed (node, failed_reason);
continue;
}
}
continue;
}
- if (!cgraph_default_inline_p (edge->callee))
+ if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
{
if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
&edge->inline_failed))
{
- edge->inline_failed =
- N_("--param max-inline-insns-single limit reached after inlining into the callee");
if (dump_file)
fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
}
{
struct cgraph_edge *e;
bool inlined = false;
+ const char *failed_reason;
/* First of all look for always inline functions. */
for (e = node->callees; e; e = e->next_callee)
&& cgraph_check_inline_limits (node, e->callee, &e->inline_failed)
&& DECL_SAVED_TREE (e->callee->decl))
{
- if (cgraph_default_inline_p (e->callee))
+ if (cgraph_default_inline_p (e->callee, &failed_reason))
{
if (dump_file && early)
fprintf (dump_file, " Early inlining %s into %s\n",
inlined = true;
}
else if (!early)
- e->inline_failed
- = N_("--param max-inline-insns-single limit reached");
+ e->inline_failed = failed_reason;
}
if (early && inlined)
{