#include "tree-flow.h"
#include "ipa-prop.h"
#include "lto-streamer.h"
+#include "data-streamer.h"
+#include "tree-streamer.h"
#include "ipa-inline.h"
#include "alloc-pool.h"
for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
{
- tree val = VEC_index (tree, known_vals, c->operand_num);
+ tree val;
tree res;
+ /* We allow call stmt to have fewer arguments than the callee
+ function (especially for K&R style programs). So bound
+ check here. */
+ if (c->operand_num < (int)VEC_length (tree, known_vals))
+ val = VEC_index (tree, known_vals, c->operand_num);
+ else
+ val = NULL;
+
if (!val)
{
clause |= 1 << (i + predicate_first_dynamic_condition);
evaluate_conditions_for_edge (struct cgraph_edge *e, bool inline_p)
{
clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
- struct inline_summary *info = inline_summary (e->callee);
+ struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
+ struct inline_summary *info = inline_summary (callee);
int i;
if (ipa_node_params_vector && info->conds
if (cst)
VEC_replace (tree, known_vals, i, cst);
}
- clause = evaluate_conditions_for_known_args (e->callee,
+ clause = evaluate_conditions_for_known_args (callee,
inline_p, known_vals);
VEC_free (tree, heap, known_vals);
}
bool inlined_to_p = false;
struct cgraph_edge *edge;
- info->entry = false;
+ info->entry = 0;
VEC_safe_grow_cleared (tree, heap, known_vals, count);
for (i = 0; i < count; i++)
{
for (edge = node->callees; edge; edge = edge->next_callee)
{
struct inline_edge_summary *es = inline_edge_summary (edge);
+ struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee, NULL);
fprintf (f, "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i time: %2i callee size:%2i stack:%2i",
- indent, "", cgraph_node_name (edge->callee),
- edge->callee->uid,
+ indent, "", cgraph_node_name (callee),
+ callee->uid,
!edge->inline_failed ? "inlined"
: cgraph_inline_failed_string (edge->inline_failed),
indent, "",
edge->frequency,
es->call_stmt_size,
es->call_stmt_time,
- (int)inline_summary (edge->callee)->size,
- (int)inline_summary (edge->callee)->estimated_stack_size);
+ (int)inline_summary (callee)->size,
+ (int)inline_summary (callee)->estimated_stack_size);
if (es->predicate)
{
fprintf (f, " predicate: ");
{
fprintf (f, "%*sStack frame offset %i, callee self size %i, callee size %i\n",
indent+2, "",
- (int)inline_summary (edge->callee)->stack_frame_offset,
- (int)inline_summary (edge->callee)->estimated_self_stack_size,
- (int)inline_summary (edge->callee)->estimated_stack_size);
- dump_inline_edge_summary (f, indent+2, edge->callee, info);
+ (int)inline_summary (callee)->stack_frame_offset,
+ (int)inline_summary (callee)->estimated_self_stack_size,
+ (int)inline_summary (callee)->estimated_stack_size);
+ dump_inline_edge_summary (f, indent+2, callee, info);
}
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
/* Do not inline calls where we cannot triviall work around
mismatches in argument or return types. */
if (edge->callee
- && !gimple_check_call_matching_types (stmt, edge->callee->decl))
+ && cgraph_function_or_thunk_node (edge->callee, NULL)
+ && !gimple_check_call_matching_types (stmt,
+ cgraph_function_or_thunk_node (edge->callee,
+ NULL)->decl))
{
edge->call_stmt_cannot_inline_p = true;
gimple_call_set_cannot_inline (stmt, true);
/* See if we can remap condition operand to caller's operand.
Otherwise give up. */
if (!operand_map
+ || (int)VEC_length (int, operand_map) <= c->operand_num
|| VEC_index (int, operand_map, c->operand_num) == -1)
cond_predicate = true_predicate ();
else
if (!e->inline_failed)
remap_edge_predicates (e->callee, info, callee_info, operand_map,
possible_truths, toplev_predicate);
+ else
+ edge_set_predicate (e, toplev_predicate);
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
e->frequency = 0;
}
}
+ else
+ edge_set_predicate (e, toplev_predicate);
}
}
struct inline_edge_summary *es = inline_edge_summary (edge);
gcc_checking_assert (edge->inline_failed);
- estimate_node_size_and_time (edge->callee,
+ estimate_node_size_and_time (cgraph_function_or_thunk_node (edge->callee, NULL),
evaluate_conditions_for_edge (edge, true),
&size, &time);
do_estimate_edge_growth (struct cgraph_edge *edge)
{
int size;
+ struct cgraph_node *callee;
/* When we do caching, use do_estimate_edge_time to populate the entry. */
gcc_checking_assert (size);
return size - (size > 0);
}
+ callee = cgraph_function_or_thunk_node (edge->callee, NULL);
/* Early inliner runs without caching, go ahead and do the dirty work. */
gcc_checking_assert (edge->inline_failed);
- estimate_node_size_and_time (edge->callee,
+ estimate_node_size_and_time (callee,
evaluate_conditions_for_edge (edge, true),
&size, NULL);
gcc_checking_assert (inline_edge_summary (edge)->call_stmt_size);
}
-/* Estimate the growth caused by inlining NODE into all callees. */
+struct growth_data
+{
+ bool self_recursive;
+ int growth;
+};
-int
-do_estimate_growth (struct cgraph_node *node)
+
+/* Worker for do_estimate_growth. Collect growth for all callers. */
+
+static bool
+do_estimate_growth_1 (struct cgraph_node *node, void *data)
{
- int growth = 0;
struct cgraph_edge *e;
- bool self_recursive = false;
- struct inline_summary *info = inline_summary (node);
+ struct growth_data *d = (struct growth_data *) data;
for (e = node->callers; e; e = e->next_caller)
{
if (e->caller == node
|| (e->caller->global.inlined_to
&& e->caller->global.inlined_to == node))
- self_recursive = true;
- growth += estimate_edge_growth (e);
+ d->self_recursive = true;
+ d->growth += estimate_edge_growth (e);
}
-
+ return false;
+}
+
+
+/* Estimate the growth caused by inlining NODE into all callees. */
+
+int
+do_estimate_growth (struct cgraph_node *node)
+{
+ struct growth_data d = {0, false};
+ struct inline_summary *info = inline_summary (node);
+
+ cgraph_for_node_and_aliases (node, do_estimate_growth_1, &d, true);
/* For self recursive functions the growth estimation really should be
infinity. We don't want to return very large values because the growth
plays various roles in badness computation fractions. Be sure to not
return zero or negative growths. */
- if (self_recursive)
- growth = growth < info->size ? info->size : growth;
+ if (d.self_recursive)
+ d.growth = d.growth < info->size ? info->size : d.growth;
else
{
- if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
- && !DECL_EXTERNAL (node->decl))
- growth -= info->size;
+ if (!DECL_EXTERNAL (node->decl)
+ && cgraph_will_be_removed_from_program_if_no_direct_calls (node))
+ d.growth -= info->size;
/* COMDAT functions are very often not shared across multiple units since they
come from various template instantiations. Take this into account. */
else if (DECL_COMDAT (node->decl)
&& cgraph_can_remove_if_no_direct_calls_p (node))
- growth -= (info->size
- * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
+ d.growth -= (info->size
+ * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
}
if (node_growth_cache)
{
if ((int)VEC_length (int, node_growth_cache) <= node->uid)
VEC_safe_grow_cleared (int, heap, node_growth_cache, cgraph_max_uid);
- VEC_replace (int, node_growth_cache, node->uid, growth + (growth >= 0));
+ VEC_replace (int, node_growth_cache, node->uid, d.growth + (d.growth >= 0));
}
- return growth;
+ return d.growth;
}
ipa_register_cgraph_hooks ();
FOR_EACH_DEFINED_FUNCTION (node)
+ if (!node->alias)
inline_analyze_function (node);
}
do
{
gcc_assert (k <= MAX_CLAUSES);
- clause = out.clause[k++] = lto_input_uleb128 (ib);
+ clause = out.clause[k++] = streamer_read_uhwi (ib);
}
while (clause);
+
+ /* Zero-initialize the remaining clauses in OUT. */
+ while (k <= MAX_CLAUSES)
+ out.clause[k++] = 0;
+
return out;
}
struct inline_edge_summary *es = inline_edge_summary (e);
struct predicate p;
- es->call_stmt_size = lto_input_uleb128 (ib);
- es->call_stmt_time = lto_input_uleb128 (ib);
- es->loop_depth = lto_input_uleb128 (ib);
+ es->call_stmt_size = streamer_read_uhwi (ib);
+ es->call_stmt_time = streamer_read_uhwi (ib);
+ es->loop_depth = streamer_read_uhwi (ib);
p = read_predicate (ib);
edge_set_predicate (e, &p);
}
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
header->string_size, NULL);
- f_count = lto_input_uleb128 (&ib);
+ f_count = streamer_read_uhwi (&ib);
for (i = 0; i < f_count; i++)
{
unsigned int index;
struct bitpack_d bp;
struct cgraph_edge *e;
- index = lto_input_uleb128 (&ib);
+ index = streamer_read_uhwi (&ib);
encoder = file_data->cgraph_node_encoder;
node = lto_cgraph_encoder_deref (encoder, index);
info = inline_summary (node);
info->estimated_stack_size
- = info->estimated_self_stack_size = lto_input_uleb128 (&ib);
- info->size = info->self_size = lto_input_uleb128 (&ib);
- info->time = info->self_time = lto_input_uleb128 (&ib);
+ = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
+ info->size = info->self_size = streamer_read_uhwi (&ib);
+ info->time = info->self_time = streamer_read_uhwi (&ib);
- bp = lto_input_bitpack (&ib);
+ bp = streamer_read_bitpack (&ib);
info->inlinable = bp_unpack_value (&bp, 1);
info->versionable = bp_unpack_value (&bp, 1);
- count2 = lto_input_uleb128 (&ib);
+ count2 = streamer_read_uhwi (&ib);
gcc_assert (!info->conds);
for (j = 0; j < count2; j++)
{
struct condition c;
- c.operand_num = lto_input_uleb128 (&ib);
- c.code = (enum tree_code) lto_input_uleb128 (&ib);
- c.val = lto_input_tree (&ib, data_in);
+ c.operand_num = streamer_read_uhwi (&ib);
+ c.code = (enum tree_code) streamer_read_uhwi (&ib);
+ c.val = stream_read_tree (&ib, data_in);
VEC_safe_push (condition, gc, info->conds, &c);
}
- count2 = lto_input_uleb128 (&ib);
+ count2 = streamer_read_uhwi (&ib);
gcc_assert (!info->entry);
for (j = 0; j < count2; j++)
{
struct size_time_entry e;
- e.size = lto_input_uleb128 (&ib);
- e.time = lto_input_uleb128 (&ib);
+ e.size = streamer_read_uhwi (&ib);
+ e.time = streamer_read_uhwi (&ib);
e.predicate = read_predicate (&ib);
VEC_safe_push (size_time_entry, gc, info->entry, &e);
for (j = 0; p->clause[j]; j++)
{
gcc_assert (j < MAX_CLAUSES);
- lto_output_uleb128_stream (ob->main_stream,
- p->clause[j]);
+ streamer_write_uhwi (ob, p->clause[j]);
}
- lto_output_uleb128_stream (ob->main_stream, 0);
+ streamer_write_uhwi (ob, 0);
}
write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
{
struct inline_edge_summary *es = inline_edge_summary (e);
- lto_output_uleb128_stream (ob->main_stream, es->call_stmt_size);
- lto_output_uleb128_stream (ob->main_stream, es->call_stmt_time);
- lto_output_uleb128_stream (ob->main_stream, es->loop_depth);
+ streamer_write_uhwi (ob, es->call_stmt_size);
+ streamer_write_uhwi (ob, es->call_stmt_time);
+ streamer_write_uhwi (ob, es->loop_depth);
write_predicate (ob, es->predicate);
}
for (i = 0; i < lto_cgraph_encoder_size (encoder); i++)
if (lto_cgraph_encoder_deref (encoder, i)->analyzed)
count++;
- lto_output_uleb128_stream (ob->main_stream, count);
+ streamer_write_uhwi (ob, count);
for (i = 0; i < lto_cgraph_encoder_size (encoder); i++)
{
struct condition *c;
- lto_output_uleb128_stream (ob->main_stream,
- lto_cgraph_encoder_encode (encoder, node));
- lto_output_sleb128_stream (ob->main_stream,
- info->estimated_self_stack_size);
- lto_output_sleb128_stream (ob->main_stream,
- info->self_size);
- lto_output_sleb128_stream (ob->main_stream,
- info->self_time);
+ streamer_write_uhwi (ob, lto_cgraph_encoder_encode (encoder, node));
+ streamer_write_hwi (ob, info->estimated_self_stack_size);
+ streamer_write_hwi (ob, info->self_size);
+ streamer_write_hwi (ob, info->self_time);
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, info->inlinable, 1);
bp_pack_value (&bp, info->versionable, 1);
- lto_output_bitpack (&bp);
- lto_output_uleb128_stream (ob->main_stream,
- VEC_length (condition, info->conds));
+ streamer_write_bitpack (&bp);
+ streamer_write_uhwi (ob, VEC_length (condition, info->conds));
for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
{
- lto_output_uleb128_stream (ob->main_stream,
- c->operand_num);
- lto_output_uleb128_stream (ob->main_stream,
- c->code);
- lto_output_tree (ob, c->val, true);
+ streamer_write_uhwi (ob, c->operand_num);
+ streamer_write_uhwi (ob, c->code);
+ stream_write_tree (ob, c->val, true);
}
- lto_output_uleb128_stream (ob->main_stream,
- VEC_length (size_time_entry, info->entry));
+ streamer_write_uhwi (ob, VEC_length (size_time_entry, info->entry));
for (i = 0;
VEC_iterate (size_time_entry, info->entry, i, e);
i++)
{
- lto_output_uleb128_stream (ob->main_stream,
- e->size);
- lto_output_uleb128_stream (ob->main_stream,
- e->time);
+ streamer_write_uhwi (ob, e->size);
+ streamer_write_uhwi (ob, e->time);
write_predicate (ob, &e->predicate);
}
for (edge = node->callees; edge; edge = edge->next_callee)
write_inline_edge_summary (ob, edge);
}
}
- lto_output_1_stream (ob->main_stream, 0);
+ streamer_write_char_stream (ob->main_stream, 0);
produce_asm (ob, NULL);
destroy_output_block (ob);