#include "tree-flow.h"
#include "ipa-prop.h"
#include "lto-streamer.h"
+#include "data-streamer.h"
+#include "tree-streamer.h"
#include "ipa-inline.h"
#include "alloc-pool.h"
/* We need to create initial empty unconitional clause, but otherwie
we don't need to account empty times and sizes. */
- if (!size && !time && summary->conds)
+ if (!size && !time && summary->entry)
return;
/* Watch overflow that might result from insane profiles. */
}
+/* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
+ Return clause of possible truths. When INLINE_P is true, assume that
+ we are inlining. */
+
+static clause_t
+evaluate_conditions_for_known_args (struct cgraph_node *node,
+ bool inline_p,
+ VEC (tree, heap) *known_vals)
+{
+ clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
+ struct inline_summary *info = inline_summary (node);
+ int i;
+ struct condition *c;
+
+ for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
+ {
+ tree val;
+ tree res;
+
+ /* We allow call stmt to have fewer arguments than the callee
+ function (especially for K&R style programs). So bound
+ check here. */
+ if (c->operand_num < (int)VEC_length (tree, known_vals))
+ val = VEC_index (tree, known_vals, c->operand_num);
+ else
+ val = NULL;
+
+ if (!val)
+ {
+ clause |= 1 << (i + predicate_first_dynamic_condition);
+ continue;
+ }
+ if (c->code == IS_NOT_CONSTANT)
+ continue;
+ res = fold_binary_to_constant (c->code, boolean_type_node, val, c->val);
+ if (res
+ && integer_zerop (res))
+ continue;
+ clause |= 1 << (i + predicate_first_dynamic_condition);
+ }
+ return clause;
+}
+
+
/* Work out what conditions might be true at invocation of E. */
static clause_t
evaluate_conditions_for_edge (struct cgraph_edge *e, bool inline_p)
{
clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
- struct inline_summary *info = inline_summary (e->callee);
+ struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
+ struct inline_summary *info = inline_summary (callee);
int i;
if (ipa_node_params_vector && info->conds
struct ipa_node_params *parms_info;
struct ipa_edge_args *args = IPA_EDGE_REF (e);
int i, count = ipa_get_cs_argument_count (args);
- struct ipcp_lattice lat;
- struct condition *c;
VEC (tree, heap) *known_vals = NULL;
if (e->caller->global.inlined_to)
VEC_safe_grow_cleared (tree, heap, known_vals, count);
for (i = 0; i < count; i++)
{
- ipa_lattice_from_jfunc (parms_info, &lat, ipa_get_ith_jump_func (args, i));
- if (lat.type == IPA_CONST_VALUE)
- VEC_replace (tree, known_vals, i, lat.constant);
- }
- for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
- {
- tree val = VEC_index (tree, known_vals, c->operand_num);
- tree res;
-
- if (!val)
- {
- clause |= 1 << (i + predicate_first_dynamic_condition);
- continue;
- }
- if (c->code == IS_NOT_CONSTANT)
- continue;
- res = fold_binary_to_constant (c->code, boolean_type_node, val, c->val);
- if (res
- && integer_zerop (res))
- continue;
- clause |= 1 << (i + predicate_first_dynamic_condition);
+ tree cst = ipa_cst_from_jfunc (parms_info,
+ ipa_get_ith_jump_func (args, i));
+ if (cst)
+ VEC_replace (tree, known_vals, i, cst);
}
+ clause = evaluate_conditions_for_known_args (callee,
+ inline_p, known_vals);
VEC_free (tree, heap, known_vals);
}
else
info = inline_summary (dst);
memcpy (info, inline_summary (src),
sizeof (struct inline_summary));
+ /* TODO: as an optimization, we may avoid copying conditions
+ that are known to be false or true. */
info->conds = VEC_copy (condition, gc, info->conds);
- info->entry = VEC_copy (size_time_entry, gc, info->entry);
+
+ /* When there are any replacements in the function body, see if we can figure
+ out that something was optimized out. */
+ if (ipa_node_params_vector && dst->clone.tree_map)
+ {
+ VEC(size_time_entry,gc) *entry = info->entry;
+ /* Use SRC parm info since it may not be copied yet. */
+ struct ipa_node_params *parms_info = IPA_NODE_REF (src);
+ VEC (tree, heap) *known_vals = NULL;
+ int count = ipa_get_param_count (parms_info);
+ int i,j;
+ clause_t possible_truths;
+ struct predicate true_pred = true_predicate ();
+ size_time_entry *e;
+ int optimized_out_size = 0;
+ gcov_type optimized_out_time = 0;
+ bool inlined_to_p = false;
+ struct cgraph_edge *edge;
+
+ info->entry = 0;
+ VEC_safe_grow_cleared (tree, heap, known_vals, count);
+ for (i = 0; i < count; i++)
+ {
+ tree t = ipa_get_param (parms_info, i);
+ struct ipa_replace_map *r;
+
+ for (j = 0;
+ VEC_iterate (ipa_replace_map_p, dst->clone.tree_map, j, r);
+ j++)
+ {
+ if (r->old_tree == t
+ && r->replace_p
+ && !r->ref_p)
+ {
+ VEC_replace (tree, known_vals, i, r->new_tree);
+ break;
+ }
+ }
+ }
+ possible_truths = evaluate_conditions_for_known_args (dst,
+ false, known_vals);
+ VEC_free (tree, heap, known_vals);
+
+ account_size_time (info, 0, 0, &true_pred);
+
+ /* Remap size_time vectors.
+ Simplify the predicate by prunning out alternatives that are known
+ to be false.
+ TODO: as on optimization, we can also eliminate conditions known to be true. */
+ for (i = 0; VEC_iterate (size_time_entry, entry, i, e); i++)
+ {
+ struct predicate new_predicate = true_predicate ();
+ for (j = 0; e->predicate.clause[j]; j++)
+ if (!(possible_truths & e->predicate.clause[j]))
+ {
+ new_predicate = false_predicate ();
+ break;
+ }
+ else
+ add_clause (&new_predicate,
+ possible_truths & e->predicate.clause[j]);
+ if (false_predicate_p (&new_predicate))
+ {
+ optimized_out_size += e->size;
+ optimized_out_time += e->time;
+ }
+ else
+ account_size_time (info, e->size, e->time, &new_predicate);
+ }
+
+ /* Remap edge predicates with the same simplificaiton as above. */
+ for (edge = dst->callees; edge; edge = edge->next_callee)
+ {
+ struct predicate new_predicate = true_predicate ();
+ struct inline_edge_summary *es = inline_edge_summary (edge);
+
+ if (!edge->inline_failed)
+ inlined_to_p = true;
+ if (!es->predicate)
+ continue;
+ for (j = 0; es->predicate->clause[j]; j++)
+ if (!(possible_truths & es->predicate->clause[j]))
+ {
+ new_predicate = false_predicate ();
+ break;
+ }
+ else
+ add_clause (&new_predicate,
+ possible_truths & es->predicate->clause[j]);
+ if (false_predicate_p (&new_predicate)
+ && !false_predicate_p (es->predicate))
+ {
+ optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
+ optimized_out_time += (es->call_stmt_time
+ * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)
+ * edge->frequency);
+ edge->frequency = 0;
+ }
+ *es->predicate = new_predicate;
+ }
+
+ /* Remap indirect edge predicates with the same simplificaiton as above. */
+ for (edge = dst->indirect_calls; edge; edge = edge->next_callee)
+ {
+ struct predicate new_predicate = true_predicate ();
+ struct inline_edge_summary *es = inline_edge_summary (edge);
+
+ if (!edge->inline_failed)
+ inlined_to_p = true;
+ if (!es->predicate)
+ continue;
+ for (j = 0; es->predicate->clause[j]; j++)
+ if (!(possible_truths & es->predicate->clause[j]))
+ {
+ new_predicate = false_predicate ();
+ break;
+ }
+ else
+ add_clause (&new_predicate,
+ possible_truths & es->predicate->clause[j]);
+ if (false_predicate_p (&new_predicate)
+ && !false_predicate_p (es->predicate))
+ {
+ optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
+ optimized_out_time += (es->call_stmt_time
+ * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)
+ * edge->frequency);
+ edge->frequency = 0;
+ }
+ *es->predicate = new_predicate;
+ }
+
+ /* If inliner or someone after inliner will ever start producing
+ non-trivial clones, we will get trouble with lack of information
+ about updating self sizes, because size vectors already contains
+ sizes of the calees. */
+ gcc_assert (!inlined_to_p
+ || (!optimized_out_size && !optimized_out_time));
+
+ info->size -= optimized_out_size / INLINE_SIZE_SCALE;
+ info->self_size -= optimized_out_size / INLINE_SIZE_SCALE;
+ gcc_assert (info->size > 0);
+ gcc_assert (info->self_size > 0);
+
+ optimized_out_time /= INLINE_TIME_SCALE;
+ if (optimized_out_time > MAX_TIME)
+ optimized_out_time = MAX_TIME;
+ info->time -= optimized_out_time;
+ info->self_time -= optimized_out_time;
+ if (info->time < 0)
+ info->time = 0;
+ if (info->self_time < 0)
+ info->self_time = 0;
+ }
+ else
+ info->entry = VEC_copy (size_time_entry, gc, info->entry);
}
for (edge = node->callees; edge; edge = edge->next_callee)
{
struct inline_edge_summary *es = inline_edge_summary (edge);
- fprintf (f, "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i time: %2i",
- indent, "", cgraph_node_name (edge->callee),
- edge->callee->uid,
+ struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee, NULL);
+ fprintf (f, "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i time: %2i callee size:%2i stack:%2i",
+ indent, "", cgraph_node_name (callee),
+ callee->uid,
!edge->inline_failed ? "inlined"
: cgraph_inline_failed_string (edge->inline_failed),
indent, "",
es->loop_depth,
edge->frequency,
es->call_stmt_size,
- es->call_stmt_time);
+ es->call_stmt_time,
+ (int)inline_summary (callee)->size,
+ (int)inline_summary (callee)->estimated_stack_size);
if (es->predicate)
{
fprintf (f, " predicate: ");
else
fprintf (f, "\n");
if (!edge->inline_failed)
- dump_inline_edge_summary (f, indent+2, edge->callee, info);
+ {
+ fprintf (f, "%*sStack frame offset %i, callee self size %i, callee size %i\n",
+ indent+2, "",
+ (int)inline_summary (callee)->stack_frame_offset,
+ (int)inline_summary (callee)->estimated_self_stack_size,
+ (int)inline_summary (callee)->estimated_stack_size);
+ dump_inline_edge_summary (f, indent+2, callee, info);
+ }
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
}
-static void
+void
dump_inline_summary (FILE * f, struct cgraph_node *node)
{
if (node->analyzed)
}
}
-void
+DEBUG_FUNCTION void
debug_inline_summary (struct cgraph_node *node)
{
dump_inline_summary (stderr, node);
/* Do not inline calls where we cannot triviall work around
mismatches in argument or return types. */
if (edge->callee
- && !gimple_check_call_matching_types (stmt, edge->callee->decl))
+ && cgraph_function_or_thunk_node (edge->callee, NULL)
+ && !gimple_check_call_matching_types (stmt,
+ cgraph_function_or_thunk_node (edge->callee,
+ NULL)->decl))
{
edge->call_stmt_cannot_inline_p = true;
gimple_call_set_cannot_inline (stmt, true);
info = inline_summary (node);
+ /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
+ Once this happen, we will need to more curefully predict call
+ statement size. */
+ if (node->thunk.thunk_p)
+ {
+ struct inline_edge_summary *es = inline_edge_summary (node->callees);
+ struct predicate t = true_predicate ();
+
+ info->inlinable = info->versionable = 0;
+ node->callees->call_stmt_cannot_inline_p = true;
+ node->local.can_change_signature = false;
+ es->call_stmt_time = 1;
+ es->call_stmt_size = 1;
+ account_size_time (info, 0, 0, &t);
+ return;
+ }
+
/* Estimate the stack size for the function if we're optimizing. */
self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
info->estimated_self_stack_size = self_stack_size;
}
-/* Estimate size and time needed to execute callee of EDGE assuming
- that parameters known to be constant at caller of EDGE are
- propagated. If INLINE_P is true, it is assumed that call will
- be inlined. */
+/* Estimate size and time needed to execute NODE assuming
+ POSSIBLE_TRUTHS clause. */
static void
-estimate_callee_size_and_time (struct cgraph_edge *edge, bool inline_p,
- int *ret_size, int *ret_time)
+estimate_node_size_and_time (struct cgraph_node *node,
+ clause_t possible_truths,
+ int *ret_size, int *ret_time)
{
- struct inline_summary *info = inline_summary (edge->callee);
- clause_t clause = evaluate_conditions_for_edge (edge, inline_p);
+ struct inline_summary *info = inline_summary (node);
size_time_entry *e;
int size = 0, time = 0;
int i;
&& (dump_flags & TDF_DETAILS))
{
bool found = false;
- fprintf (dump_file, " Estimating callee body: %s/%i\n"
+ fprintf (dump_file, " Estimating body: %s/%i\n"
" Known to be false: ",
- cgraph_node_name (edge->callee),
- edge->callee->uid);
+ cgraph_node_name (node),
+ node->uid);
for (i = predicate_not_inlined_condition;
i < (predicate_first_dynamic_condition
+ (int)VEC_length (condition, info->conds)); i++)
- if (!(clause & (1 << i)))
+ if (!(possible_truths & (1 << i)))
{
if (found)
fprintf (dump_file, ", ");
}
for (i = 0; VEC_iterate (size_time_entry, info->entry, i, e); i++)
- if (evaluate_predicate (&e->predicate, clause))
+ if (evaluate_predicate (&e->predicate, possible_truths))
time += e->time, size += e->size;
if (time > MAX_TIME * INLINE_TIME_SCALE)
time = MAX_TIME * INLINE_TIME_SCALE;
- estimate_calls_size_and_time (edge->callee, &size, &time, clause);
+ estimate_calls_size_and_time (node, &size, &time, possible_truths);
time = (time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
size = (size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
}
+/* Estimate size and time needed to execute callee of EDGE assuming that
+ parameters known to be constant at caller of EDGE are propagated.
+ KNOWN_VALs is a vector of assumed known constant values for parameters. */
+
+void
+estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
+ VEC (tree, heap) *known_vals,
+ int *ret_size, int *ret_time)
+{
+ clause_t clause;
+
+ clause = evaluate_conditions_for_known_args (node, false, known_vals);
+ estimate_node_size_and_time (node, clause, ret_size, ret_time);
+}
+
+
/* Translate all conditions from callee representation into caller representation and
symbolically evaluate predicate P into new predicate.
/* See if we can remap condition operand to caller's operand.
Otherwise give up. */
if (!operand_map
+ || (int)VEC_length (int, operand_map) <= c->operand_num
|| VEC_index (int, operand_map, c->operand_num) == -1)
cond_predicate = true_predicate ();
else
if (!e->inline_failed)
remap_edge_predicates (e->callee, info, callee_info, operand_map,
possible_truths, toplev_predicate);
+ else
+ edge_set_predicate (e, toplev_predicate);
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
e->frequency = 0;
}
}
+ else
+ edge_set_predicate (e, toplev_predicate);
}
}
struct inline_edge_summary *es = inline_edge_summary (edge);
gcc_checking_assert (edge->inline_failed);
- estimate_callee_size_and_time (edge, true, &size, &time);
+ estimate_node_size_and_time (cgraph_function_or_thunk_node (edge->callee, NULL),
+ evaluate_conditions_for_edge (edge, true),
+ &size, &time);
ret = (((gcov_type)time - es->call_stmt_time) * edge->frequency
+ CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
do_estimate_edge_growth (struct cgraph_edge *edge)
{
int size;
+ struct cgraph_node *callee;
/* When we do caching, use do_estimate_edge_time to populate the entry. */
gcc_checking_assert (size);
return size - (size > 0);
}
+ callee = cgraph_function_or_thunk_node (edge->callee, NULL);
/* Early inliner runs without caching, go ahead and do the dirty work. */
gcc_checking_assert (edge->inline_failed);
- estimate_callee_size_and_time (edge, true, &size, NULL);
+ estimate_node_size_and_time (callee,
+ evaluate_conditions_for_edge (edge, true),
+ &size, NULL);
gcc_checking_assert (inline_edge_summary (edge)->call_stmt_size);
return size - inline_edge_summary (edge)->call_stmt_size;
}
}
-/* Estimate the growth caused by inlining NODE into all callees. */
+struct growth_data
+{
+ bool self_recursive;
+ int growth;
+};
-int
-do_estimate_growth (struct cgraph_node *node)
+
+/* Worker for do_estimate_growth. Collect growth for all callers. */
+
+static bool
+do_estimate_growth_1 (struct cgraph_node *node, void *data)
{
- int growth = 0;
struct cgraph_edge *e;
- bool self_recursive = false;
- struct inline_summary *info = inline_summary (node);
+ struct growth_data *d = (struct growth_data *) data;
for (e = node->callers; e; e = e->next_caller)
{
if (e->caller == node
|| (e->caller->global.inlined_to
&& e->caller->global.inlined_to == node))
- self_recursive = true;
- growth += estimate_edge_growth (e);
+ d->self_recursive = true;
+ d->growth += estimate_edge_growth (e);
}
-
+ return false;
+}
+
+
+/* Estimate the growth caused by inlining NODE into all callees. */
+
+int
+do_estimate_growth (struct cgraph_node *node)
+{
+ struct growth_data d = {0, false};
+ struct inline_summary *info = inline_summary (node);
+
+ cgraph_for_node_and_aliases (node, do_estimate_growth_1, &d, true);
/* For self recursive functions the growth estimation really should be
infinity. We don't want to return very large values because the growth
plays various roles in badness computation fractions. Be sure to not
return zero or negative growths. */
- if (self_recursive)
- growth = growth < info->size ? info->size : growth;
+ if (d.self_recursive)
+ d.growth = d.growth < info->size ? info->size : d.growth;
else
{
- if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
- && !DECL_EXTERNAL (node->decl))
- growth -= info->size;
+ if (!DECL_EXTERNAL (node->decl)
+ && cgraph_will_be_removed_from_program_if_no_direct_calls (node))
+ d.growth -= info->size;
/* COMDAT functions are very often not shared across multiple units since they
come from various template instantiations. Take this into account. */
else if (DECL_COMDAT (node->decl)
&& cgraph_can_remove_if_no_direct_calls_p (node))
- growth -= (info->size
- * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
+ d.growth -= (info->size
+ * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
}
if (node_growth_cache)
{
if ((int)VEC_length (int, node_growth_cache) <= node->uid)
VEC_safe_grow_cleared (int, heap, node_growth_cache, cgraph_max_uid);
- VEC_replace (int, node_growth_cache, node->uid, growth + (growth >= 0));
+ VEC_replace (int, node_growth_cache, node->uid, d.growth + (d.growth >= 0));
}
- return growth;
+ return d.growth;
}
cgraph_node_name (node), node->uid);
/* FIXME: We should remove the optimize check after we ensure we never run
IPA passes when not optimizing. */
- if (flag_indirect_inlining && optimize)
+ if (flag_indirect_inlining && optimize && !node->thunk.thunk_p)
inline_indirect_intraprocedural_analysis (node);
compute_inline_parameters (node, false);
if (flag_indirect_inlining)
ipa_register_cgraph_hooks ();
- for (node = cgraph_nodes; node; node = node->next)
- if (node->analyzed)
+ FOR_EACH_DEFINED_FUNCTION (node)
+ if (!node->alias)
inline_analyze_function (node);
}
do
{
gcc_assert (k <= MAX_CLAUSES);
- clause = out.clause[k++] = lto_input_uleb128 (ib);
+ clause = out.clause[k++] = streamer_read_uhwi (ib);
}
while (clause);
+
+ /* Zero-initialize the remaining clauses in OUT. */
+ while (k <= MAX_CLAUSES)
+ out.clause[k++] = 0;
+
return out;
}
struct inline_edge_summary *es = inline_edge_summary (e);
struct predicate p;
- es->call_stmt_size = lto_input_uleb128 (ib);
- es->call_stmt_time = lto_input_uleb128 (ib);
- es->loop_depth = lto_input_uleb128 (ib);
+ es->call_stmt_size = streamer_read_uhwi (ib);
+ es->call_stmt_time = streamer_read_uhwi (ib);
+ es->loop_depth = streamer_read_uhwi (ib);
p = read_predicate (ib);
edge_set_predicate (e, &p);
}
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
header->string_size, NULL);
- f_count = lto_input_uleb128 (&ib);
+ f_count = streamer_read_uhwi (&ib);
for (i = 0; i < f_count; i++)
{
unsigned int index;
struct bitpack_d bp;
struct cgraph_edge *e;
- index = lto_input_uleb128 (&ib);
+ index = streamer_read_uhwi (&ib);
encoder = file_data->cgraph_node_encoder;
node = lto_cgraph_encoder_deref (encoder, index);
info = inline_summary (node);
info->estimated_stack_size
- = info->estimated_self_stack_size = lto_input_uleb128 (&ib);
- info->size = info->self_size = lto_input_uleb128 (&ib);
- info->time = info->self_time = lto_input_uleb128 (&ib);
+ = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
+ info->size = info->self_size = streamer_read_uhwi (&ib);
+ info->time = info->self_time = streamer_read_uhwi (&ib);
- bp = lto_input_bitpack (&ib);
+ bp = streamer_read_bitpack (&ib);
info->inlinable = bp_unpack_value (&bp, 1);
info->versionable = bp_unpack_value (&bp, 1);
- count2 = lto_input_uleb128 (&ib);
+ count2 = streamer_read_uhwi (&ib);
gcc_assert (!info->conds);
for (j = 0; j < count2; j++)
{
struct condition c;
- c.operand_num = lto_input_uleb128 (&ib);
- c.code = (enum tree_code) lto_input_uleb128 (&ib);
- c.val = lto_input_tree (&ib, data_in);
+ c.operand_num = streamer_read_uhwi (&ib);
+ c.code = (enum tree_code) streamer_read_uhwi (&ib);
+ c.val = stream_read_tree (&ib, data_in);
VEC_safe_push (condition, gc, info->conds, &c);
}
- count2 = lto_input_uleb128 (&ib);
+ count2 = streamer_read_uhwi (&ib);
gcc_assert (!info->entry);
for (j = 0; j < count2; j++)
{
struct size_time_entry e;
- e.size = lto_input_uleb128 (&ib);
- e.time = lto_input_uleb128 (&ib);
+ e.size = streamer_read_uhwi (&ib);
+ e.time = streamer_read_uhwi (&ib);
e.predicate = read_predicate (&ib);
VEC_safe_push (size_time_entry, gc, info->entry, &e);
for (j = 0; p->clause[j]; j++)
{
gcc_assert (j < MAX_CLAUSES);
- lto_output_uleb128_stream (ob->main_stream,
- p->clause[j]);
+ streamer_write_uhwi (ob, p->clause[j]);
}
- lto_output_uleb128_stream (ob->main_stream, 0);
+ streamer_write_uhwi (ob, 0);
}
write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
{
struct inline_edge_summary *es = inline_edge_summary (e);
- lto_output_uleb128_stream (ob->main_stream, es->call_stmt_size);
- lto_output_uleb128_stream (ob->main_stream, es->call_stmt_time);
- lto_output_uleb128_stream (ob->main_stream, es->loop_depth);
+ streamer_write_uhwi (ob, es->call_stmt_size);
+ streamer_write_uhwi (ob, es->call_stmt_time);
+ streamer_write_uhwi (ob, es->loop_depth);
write_predicate (ob, es->predicate);
}
for (i = 0; i < lto_cgraph_encoder_size (encoder); i++)
if (lto_cgraph_encoder_deref (encoder, i)->analyzed)
count++;
- lto_output_uleb128_stream (ob->main_stream, count);
+ streamer_write_uhwi (ob, count);
for (i = 0; i < lto_cgraph_encoder_size (encoder); i++)
{
struct condition *c;
- lto_output_uleb128_stream (ob->main_stream,
- lto_cgraph_encoder_encode (encoder, node));
- lto_output_sleb128_stream (ob->main_stream,
- info->estimated_self_stack_size);
- lto_output_sleb128_stream (ob->main_stream,
- info->self_size);
- lto_output_sleb128_stream (ob->main_stream,
- info->self_time);
+ streamer_write_uhwi (ob, lto_cgraph_encoder_encode (encoder, node));
+ streamer_write_hwi (ob, info->estimated_self_stack_size);
+ streamer_write_hwi (ob, info->self_size);
+ streamer_write_hwi (ob, info->self_time);
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, info->inlinable, 1);
bp_pack_value (&bp, info->versionable, 1);
- lto_output_bitpack (&bp);
- lto_output_uleb128_stream (ob->main_stream,
- VEC_length (condition, info->conds));
+ streamer_write_bitpack (&bp);
+ streamer_write_uhwi (ob, VEC_length (condition, info->conds));
for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
{
- lto_output_uleb128_stream (ob->main_stream,
- c->operand_num);
- lto_output_uleb128_stream (ob->main_stream,
- c->code);
- lto_output_tree (ob, c->val, true);
+ streamer_write_uhwi (ob, c->operand_num);
+ streamer_write_uhwi (ob, c->code);
+ stream_write_tree (ob, c->val, true);
}
- lto_output_uleb128_stream (ob->main_stream,
- VEC_length (size_time_entry, info->entry));
+ streamer_write_uhwi (ob, VEC_length (size_time_entry, info->entry));
for (i = 0;
VEC_iterate (size_time_entry, info->entry, i, e);
i++)
{
- lto_output_uleb128_stream (ob->main_stream,
- e->size);
- lto_output_uleb128_stream (ob->main_stream,
- e->time);
+ streamer_write_uhwi (ob, e->size);
+ streamer_write_uhwi (ob, e->time);
write_predicate (ob, &e->predicate);
}
for (edge = node->callees; edge; edge = edge->next_callee)
write_inline_edge_summary (ob, edge);
}
}
- lto_output_1_stream (ob->main_stream, 0);
+ streamer_write_char_stream (ob->main_stream, 0);
produce_asm (ob, NULL);
destroy_output_block (ob);