In that case just go ahead and re-use it. */
if (!e->callee->callers->next_caller
&& cgraph_can_remove_if_no_direct_calls_p (e->callee)
+ /* Inlining might enable more devirtualizing, so we want to remove
+ those only after all devirtualizable virtual calls are processed.
+ Lacking may edges in callgraph we just preserve them post
+ inlining. */
+ && (!DECL_VIRTUAL_P (e->callee->decl)
+ || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
/* Don't reuse if more than one function shares a comdat group.
If the other function(s) are needed, we need to emit even
this function out of line. */
overall_size += new_size - old_size;
ncalls_inlined++;
- if (flag_indirect_inlining)
+ /* FIXME: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizng. */
+ if (flag_indirect_inlining && optimize)
return ipa_propagate_indirect_call_infos (curr, new_edges);
else
return false;
we decide to not inline for different reasons, but it is not big deal
as in that case we will keep the body around, but we will also avoid
some inlining. */
- if (cgraph_only_called_directly_p (node)
+ if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
&& !DECL_EXTERNAL (node->decl) && !self_recursive)
growth -= node->global.size;
*reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
return false;
}
-
if (!n->analyzed)
{
if (reason)
*reason = CIF_BODY_NOT_AVAILABLE;
return false;
}
+ if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
+ {
+ if (reason)
+ *reason = CIF_OVERWRITABLE;
+ return false;
+ }
+
if (DECL_DECLARED_INLINE_P (decl))
{
return badness;
}
+/* Recompute badness of EDGE and update its key in HEAP if needed. */
+static void
+update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
+{
+ int badness = cgraph_edge_badness (edge, false);
+ if (edge->aux)
+ {
+ fibnode_t n = (fibnode_t) edge->aux;
+ gcc_checking_assert (n->data == edge);
+
+ /* fibheap_replace_key only decrease the keys.
+ When we increase the key we do not update heap
+ and instead re-insert the element once it becomes
+ a minium of heap. */
+ if (badness < n->key)
+ {
+ fibheap_replace_key (heap, n, badness);
+ gcc_checking_assert (n->key == badness);
+ }
+ }
+ else
+ edge->aux = fibheap_insert (heap, badness, edge);
+}
+
/* Recompute heap nodes for each of caller edge. */
static void
cgraph_inline_failed_t failed_reason;
if (!node->local.inlinable
+ || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
|| node->global.inlined_to)
return;
- if (bitmap_bit_p (updated_nodes, node->uid))
+ if (!bitmap_set_bit (updated_nodes, node->uid))
return;
- bitmap_set_bit (updated_nodes, node->uid);
node->global.estimated_growth = INT_MIN;
- if (!node->local.inlinable)
- return;
/* See if there is something to do. */
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->inline_failed)
for (; edge; edge = edge->next_caller)
if (edge->inline_failed)
+ update_edge_key (heap, edge);
+}
+
+/* Recompute heap nodes for each uninlined call.
+ This is used when we know that edge badnesses are going only to increase
+ (we introduced new call site) and thus all we need is to insert newly
+ created edges into heap. */
+
+static void
+update_callee_keys (fibheap_t heap, struct cgraph_node *node,
+ bitmap updated_nodes)
+{
+ struct cgraph_edge *e = node->callees;
+ node->global.estimated_growth = INT_MIN;
+
+ if (!e)
+ return;
+ while (true)
+ if (!e->inline_failed && e->callee->callees)
+ e = e->callee->callees;
+ else
{
- int badness = cgraph_edge_badness (edge, false);
- if (edge->aux)
+ if (e->inline_failed
+ && e->callee->local.inlinable
+ && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
+ && !bitmap_bit_p (updated_nodes, e->callee->uid))
{
- fibnode_t n = (fibnode_t) edge->aux;
- gcc_assert (n->data == edge);
- if (n->key == badness)
- continue;
-
- /* fibheap_replace_key only decrease the keys.
- When we increase the key we do not update heap
- and instead re-insert the element once it becomes
- a minium of heap. */
- if (badness < n->key)
+ node->global.estimated_growth = INT_MIN;
+ /* If function becomes uninlinable, we need to remove it from the heap. */
+ if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
+ update_caller_keys (heap, e->callee, updated_nodes);
+ else
+ /* Otherwise update just edge E. */
+ update_edge_key (heap, e);
+ }
+ if (e->next_callee)
+ e = e->next_callee;
+ else
+ {
+ do
{
- fibheap_replace_key (heap, n, badness);
- gcc_assert (n->key == badness);
- continue;
+ if (e->caller == node)
+ return;
+ e = e->caller->callers;
}
+ while (!e->next_callee);
+ e = e->next_callee;
}
- else
- edge->aux = fibheap_insert (heap, badness, edge);
}
}
Walk recursively into all inline clones. */
static void
-update_callee_keys (fibheap_t heap, struct cgraph_node *node,
- bitmap updated_nodes)
+update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
+ bitmap updated_nodes)
{
struct cgraph_edge *e = node->callees;
node->global.estimated_growth = INT_MIN;
struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
gcc_assert (!edge->aux);
- edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
+ if (edge->callee->local.inlinable
+ && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
+ edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
}
}
continue;
if (flag_indirect_inlining)
add_new_edges_to_heap (heap, new_indirect_edges);
- update_callee_keys (heap, where, updated_nodes);
+ update_all_callee_keys (heap, where, updated_nodes);
}
else
{
continue;
}
callee = edge->callee;
+ gcc_checking_assert (!callee->global.inlined_to);
cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
if (flag_indirect_inlining)
add_new_edges_to_heap (heap, new_indirect_edges);
- update_callee_keys (heap, callee, updated_nodes);
+ /* We inlined last offline copy to the body. This might lead
+ to callees of function having fewer call sites and thus they
+ may need updating. */
+ if (callee->global.inlined_to)
+ update_all_callee_keys (heap, callee, updated_nodes);
+ else
+ update_callee_keys (heap, edge->callee, updated_nodes);
}
where = edge->caller;
if (where->global.inlined_to)
if (node->callers
&& !node->callers->next_caller
- && cgraph_only_called_directly_p (node)
+ && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
&& node->local.inlinable
+ && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
&& node->callers->inline_failed
&& node->callers->caller != node
&& node->callers->caller->global.inlined_to != node
&& !node->callers->call_stmt_cannot_inline_p
- && !DECL_EXTERNAL (node->decl)
- && !DECL_COMDAT (node->decl))
+ && !DECL_EXTERNAL (node->decl))
{
cgraph_inline_failed_t reason;
old_size = overall_size;
unsigned int todo = 0;
int iterations = 0;
- if (sorrycount || errorcount)
+ if (seen_error ())
return 0;
if (!optimize
bool rhs_free = false;
bool lhs_free = false;
- while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
+ while (handled_component_p (inner_lhs)
+ || TREE_CODE (inner_lhs) == MEM_REF)
inner_lhs = TREE_OPERAND (inner_lhs, 0);
while (handled_component_p (inner_rhs)
- || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
+ || TREE_CODE (inner_rhs) == ADDR_EXPR
+ || TREE_CODE (inner_rhs) == MEM_REF)
inner_rhs = TREE_OPERAND (inner_rhs, 0);
|| (TREE_CODE (inner_lhs) == SSA_NAME
&& TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
lhs_free = true;
- if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
+ if (lhs_free
+ && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
rhs_free = true;
if (lhs_free && rhs_free)
return true;
time_inlining_benefit += cost;
size_inlining_benefit += cost;
}
- for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
+ for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
if (!VOID_TYPE_P (TREE_TYPE (arg)))
{
int cost = estimate_move_cost (TREE_TYPE (arg));
/* Estimate the stack size for the function. But not at -O0
because estimated_stack_frame_size is a quadratic problem. */
- self_stack_size = optimize ? estimated_stack_frame_size () : 0;
+ self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
inline_summary (node)->estimated_self_stack_size = self_stack_size;
node->global.estimated_stack_size = self_stack_size;
node->global.stack_frame_offset = 0;
static void
inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
{
- ipa_initialize_node_params (node);
- ipa_detect_param_modifications (node);
- ipa_analyze_params_uses (node);
- ipa_compute_jump_functions (node);
-
- if (dump_file)
+ ipa_analyze_node (node);
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
ipa_print_node_params (dump_file, node);
ipa_print_node_jump_functions (dump_file, node);
current_function_decl = node->decl;
compute_inline_parameters (node);
- if (flag_indirect_inlining)
+ /* FIXME: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizng. */
+ if (flag_indirect_inlining && optimize)
inline_indirect_intraprocedural_analysis (node);
current_function_decl = NULL;