#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
#include "tree.h"
#include "expr.h"
#include "flags.h"
lto_output_sleb128_stream (ob->main_stream, edge->count);
bp = bitpack_create (ob->main_stream);
- uid = flag_wpa ? edge->lto_stmt_uid : gimple_uid (edge->call_stmt);
+ uid = (!gimple_has_body_p (edge->caller->decl)
+ ? edge->lto_stmt_uid : gimple_uid (edge->call_stmt));
bp_pack_value (&bp, uid, HOST_BITS_PER_INT);
bp_pack_value (&bp, edge->inline_failed, HOST_BITS_PER_INT);
bp_pack_value (&bp, edge->frequency, HOST_BITS_PER_INT);
gcc_assert (!(flags & (ECF_LOOPING_CONST_OR_PURE
| ECF_MAY_BE_ALLOCA
| ECF_SIBCALL
+ | ECF_LEAF
| ECF_NOVOPS)));
}
lto_output_bitpack (&bp);
{
if (ref->refering_type == IPA_REF_CGRAPH)
{
- if (!cgraph_node_in_set_p (ipa_ref_refering_node (ref), set))
+ if (ipa_ref_refering_node (ref)->in_other_partition
+ || !cgraph_node_in_set_p (ipa_ref_refering_node (ref), set))
return true;
}
else
{
- if (!varpool_node_in_set_p (ipa_ref_refering_varpool_node (ref),
- vset))
+ if (ipa_ref_refering_varpool_node (ref)->in_other_partition
+ || !varpool_node_in_set_p (ipa_ref_refering_varpool_node (ref),
+ vset))
return true;
}
}
if (node->global.inlined_to)
return false;
for (e = node->callers; e; e = e->next_caller)
- if (!cgraph_node_in_set_p (e->caller, set))
+ if (e->caller->in_other_partition
+ || !cgraph_node_in_set_p (e->caller, set))
return true;
return false;
}
clone_of = node->clone_of;
while (clone_of
- && (ref = lto_cgraph_encoder_lookup (encoder, node->clone_of)) == LCC_NOT_FOUND)
+ && (ref = lto_cgraph_encoder_lookup (encoder, clone_of)) == LCC_NOT_FOUND)
if (clone_of->prev_sibling_clone)
clone_of = clone_of->prev_sibling_clone;
else
clone_of = clone_of->clone_of;
+
+ if (LTO_cgraph_analyzed_node)
+ gcc_assert (clone_of || !node->clone_of);
if (!clone_of)
lto_output_sleb128_stream (ob->main_stream, LCC_NOT_FOUND);
else
lto_output_fn_decl_index (ob->decl_state, ob->main_stream, node->decl);
lto_output_sleb128_stream (ob->main_stream, node->count);
+ lto_output_sleb128_stream (ob->main_stream, node->count_materialization_scale);
if (tag == LTO_cgraph_analyzed_node)
{
bp_pack_value (&bp, node->abstract_and_needed, 1);
bp_pack_value (&bp, tag == LTO_cgraph_analyzed_node
&& !DECL_EXTERNAL (node->decl)
+ && !DECL_COMDAT (node->decl)
&& (reachable_from_other_partition_p (node, set)
|| referenced_from_other_partition_p (&node->ref_list, set, vset)), 1);
bp_pack_value (&bp, node->lowered, 1);
bp_pack_value (&bp, node->alias, 1);
bp_pack_value (&bp, node->finalized_by_frontend, 1);
bp_pack_value (&bp, node->frequency, 2);
+ bp_pack_value (&bp, node->only_called_at_startup, 1);
+ bp_pack_value (&bp, node->only_called_at_exit, 1);
lto_output_bitpack (&bp);
+ lto_output_uleb128_stream (ob->main_stream, node->resolution);
if (node->same_body)
{
lto_output_fn_decl_index (ob->decl_state, ob->main_stream,
alias->thunk.alias);
}
+ lto_output_uleb128_stream (ob->main_stream, alias->resolution);
alias = alias->previous;
}
while (alias);
/* Constant pool initializers can be de-unified into individual ltrans units.
FIXME: Alternatively at -Os we may want to avoid generating for them the local
labels and share them across LTRANS partitions. */
- if (DECL_IN_CONSTANT_POOL (node->decl))
+ if (DECL_IN_CONSTANT_POOL (node->decl)
+ && !DECL_COMDAT (node->decl))
{
bp_pack_value (&bp, 0, 1); /* used_from_other_parition. */
bp_pack_value (&bp, 0, 1); /* in_other_partition. */
else
ref = LCC_NOT_FOUND;
lto_output_sleb128_stream (ob->main_stream, ref);
+ lto_output_uleb128_stream (ob->main_stream, node->resolution);
if (count)
{
lto_output_uleb128_stream (ob->main_stream, count);
for (alias = node->extra_name; alias; alias = alias->next)
- lto_output_var_decl_index (ob->decl_state, ob->main_stream, alias->decl);
+ {
+ lto_output_var_decl_index (ob->decl_state, ob->main_stream, alias->decl);
+ lto_output_uleb128_stream (ob->main_stream, alias->resolution);
+ }
}
}
{
if (profile_info)
{
- /* We do not output num, it is not terribly useful. */
+ /* We do not output num, sum_all and run_max, they are not used by
+ GCC profile feedback and they are difficult to merge from multiple
+ units. */
gcc_assert (profile_info->runs);
lto_output_uleb128_stream (ob->main_stream, profile_info->runs);
- lto_output_sleb128_stream (ob->main_stream, profile_info->sum_all);
- lto_output_sleb128_stream (ob->main_stream, profile_info->run_max);
- lto_output_sleb128_stream (ob->main_stream, profile_info->sum_max);
+ lto_output_uleb128_stream (ob->main_stream, profile_info->sum_max);
}
else
lto_output_uleb128_stream (ob->main_stream, 0);
if (DECL_INITIAL (vnode->decl)
&& !lto_varpool_encoder_encode_initializer_p (varpool_encoder,
vnode)
- && (DECL_IN_CONSTANT_POOL (vnode->decl)
- || TREE_READONLY (vnode->decl)))
+ && const_value_known_p (vnode->decl))
{
lto_set_varpool_encoder_encode_initializer (varpool_encoder, vnode);
add_references (encoder, varpool_encoder, &vnode->ref_list);
lto_cgraph_encoder_t encoder;
lto_varpool_encoder_t varpool_encoder;
struct cgraph_asm_node *can;
+ static bool asm_nodes_output = false;
if (flag_wpa)
output_cgraph_opt_summary ();
lto_output_uleb128_stream (ob->main_stream, 0);
- /* Emit toplevel asms. */
- for (can = cgraph_asm_nodes; can; can = can->next)
+ /* Emit toplevel asms.
+ When doing WPA we must output every asm just once. Since we do not partition asm
+ nodes at all, output them to first output. This is kind of hack, but should work
+ well. */
+ if (!asm_nodes_output)
{
- int len = TREE_STRING_LENGTH (can->asm_str);
- lto_output_uleb128_stream (ob->main_stream, len);
- for (i = 0; i < len; ++i)
- lto_output_1_stream (ob->main_stream,
- TREE_STRING_POINTER (can->asm_str)[i]);
+ asm_nodes_output = true;
+ for (can = cgraph_asm_nodes; can; can = can->next)
+ {
+ int len = TREE_STRING_LENGTH (can->asm_str);
+ lto_output_uleb128_stream (ob->main_stream, len);
+ for (i = 0; i < len; ++i)
+ lto_output_1_stream (ob->main_stream,
+ TREE_STRING_POINTER (can->asm_str)[i]);
+ }
}
lto_output_uleb128_stream (ob->main_stream, 0);
unsigned int self_time,
unsigned int time_inlining_benefit,
unsigned int self_size,
- unsigned int size_inlining_benefit)
+ unsigned int size_inlining_benefit,
+ enum ld_plugin_symbol_resolution resolution)
{
node->aux = (void *) tag;
node->local.inline_summary.estimated_self_stack_size = stack_size;
node->lowered = bp_unpack_value (bp, 1);
node->analyzed = tag == LTO_cgraph_analyzed_node;
node->in_other_partition = bp_unpack_value (bp, 1);
+ if (node->in_other_partition
+ /* Avoid updating decl when we are seeing just inline clone.
+ When inlining function that has functions already inlined into it,
+ we produce clones of inline clones.
+
+ WPA partitioning might put each clone into different unit and
+ we might end up streaming inline clone from other partition
+ to support clone we are interested in. */
+ && (!node->clone_of
+ || node->clone_of->decl != node->decl))
+ {
+ DECL_EXTERNAL (node->decl) = 1;
+ TREE_STATIC (node->decl) = 0;
+ }
node->alias = bp_unpack_value (bp, 1);
node->finalized_by_frontend = bp_unpack_value (bp, 1);
node->frequency = (enum node_frequency)bp_unpack_value (bp, 2);
+ node->only_called_at_startup = bp_unpack_value (bp, 1);
+ node->only_called_at_exit = bp_unpack_value (bp, 1);
+ node->resolution = resolution;
}
/* Output the part of the cgraph in SET. */
int size_inlining_benefit = 0;
unsigned long same_body_count = 0;
int clone_ref;
+ enum ld_plugin_symbol_resolution resolution;
clone_ref = lto_input_sleb128 (ib);
node = cgraph_node (fn_decl);
node->count = lto_input_sleb128 (ib);
+ node->count_materialization_scale = lto_input_sleb128 (ib);
if (tag == LTO_cgraph_analyzed_node)
{
"node %d", node->uid);
bp = lto_input_bitpack (ib);
+ resolution = (enum ld_plugin_symbol_resolution)lto_input_uleb128 (ib);
input_overwrite_node (file_data, node, tag, &bp, stack_size, self_time,
time_inlining_benefit, self_size,
- size_inlining_benefit);
+ size_inlining_benefit, resolution);
/* Store a reference for now, and fix up later to be a pointer. */
node->global.inlined_to = (cgraph_node_ptr) (intptr_t) ref;
{
tree alias_decl;
int type;
+ struct cgraph_node *alias;
decl_index = lto_input_uleb128 (ib);
alias_decl = lto_file_decl_data_get_fn_decl (file_data, decl_index);
type = lto_input_uleb128 (ib);
tree real_alias;
decl_index = lto_input_uleb128 (ib);
real_alias = lto_file_decl_data_get_fn_decl (file_data, decl_index);
- cgraph_same_body_alias (alias_decl, real_alias);
+ alias = cgraph_same_body_alias (alias_decl, real_alias);
}
else
{
tree real_alias;
decl_index = lto_input_uleb128 (ib);
real_alias = lto_file_decl_data_get_fn_decl (file_data, decl_index);
- cgraph_add_thunk (alias_decl, fn_decl, type & 2, fixed_offset,
- virtual_value,
- (type & 4) ? size_int (virtual_value) : NULL_TREE,
- real_alias);
+ alias = cgraph_add_thunk (alias_decl, fn_decl, type & 2, fixed_offset,
+ virtual_value,
+ (type & 4) ? size_int (virtual_value) : NULL_TREE,
+ real_alias);
}
+ alias->resolution = (enum ld_plugin_symbol_resolution)lto_input_uleb128 (ib);
}
return node;
}
node->analyzed = node->finalized;
node->used_from_other_partition = bp_unpack_value (&bp, 1);
node->in_other_partition = bp_unpack_value (&bp, 1);
+ if (node->in_other_partition)
+ {
+ DECL_EXTERNAL (node->decl) = 1;
+ TREE_STATIC (node->decl) = 0;
+ }
aliases_p = bp_unpack_value (&bp, 1);
if (node->finalized)
varpool_mark_needed_node (node);
ref = lto_input_sleb128 (ib);
/* Store a reference for now, and fix up later to be a pointer. */
node->same_comdat_group = (struct varpool_node *) (intptr_t) ref;
+ node->resolution = (enum ld_plugin_symbol_resolution)lto_input_uleb128 (ib);
if (aliases_p)
{
count = lto_input_uleb128 (ib);
{
tree decl = lto_file_decl_data_get_var_decl (file_data,
lto_input_uleb128 (ib));
- varpool_extra_name_alias (decl, var_decl);
+ struct varpool_node *alias;
+ alias = varpool_extra_name_alias (decl, var_decl);
+ alias->resolution = (enum ld_plugin_symbol_resolution)lto_input_uleb128 (ib);
}
}
return node;
len = lto_input_uleb128 (ib);
}
-
- for (i = 0; VEC_iterate (cgraph_node_ptr, nodes, i, node); i++)
+ /* AUX pointers should be all non-zero for nodes read from the stream. */
+#ifdef ENABLE_CHECKING
+ FOR_EACH_VEC_ELT (cgraph_node_ptr, nodes, i, node)
+ gcc_assert (node->aux);
+#endif
+ FOR_EACH_VEC_ELT (cgraph_node_ptr, nodes, i, node)
{
int ref = (int) (intptr_t) node->global.inlined_to;
+ /* We share declaration of builtins, so we may read same node twice. */
+ if (!node->aux)
+ continue;
+ node->aux = NULL;
+
/* Fixup inlined_to from reference to pointer. */
if (ref != LCC_NOT_FOUND)
node->global.inlined_to = VEC_index (cgraph_node_ptr, nodes, ref);
else
node->same_comdat_group = NULL;
}
+ FOR_EACH_VEC_ELT (cgraph_node_ptr, nodes, i, node)
+ node->aux = (void *)1;
return nodes;
}
input_varpool_node (file_data, ib));
len--;
}
- for (i = 0; VEC_iterate (varpool_node_ptr, varpool, i, node); i++)
+#ifdef ENABLE_CHECKING
+ FOR_EACH_VEC_ELT (varpool_node_ptr, varpool, i, node)
+ gcc_assert (!node->aux);
+#endif
+ FOR_EACH_VEC_ELT (varpool_node_ptr, varpool, i, node)
{
int ref = (int) (intptr_t) node->same_comdat_group;
+ /* We share declaration of builtins, so we may read same node twice. */
+ if (node->aux)
+ continue;
+ node->aux = (void *)1;
/* Fixup same_comdat_group from reference to pointer. */
if (ref != LCC_NOT_FOUND)
else
node->same_comdat_group = NULL;
}
+ FOR_EACH_VEC_ELT (varpool_node_ptr, varpool, i, node)
+ node->aux = NULL;
return varpool;
}
/* Input profile_info from IB. */
static void
-input_profile_summary (struct lto_input_block *ib)
+input_profile_summary (struct lto_input_block *ib,
+ struct lto_file_decl_data *file_data)
{
unsigned int runs = lto_input_uleb128 (ib);
if (runs)
{
- if (!profile_info)
- {
- profile_info = <o_gcov_summary;
- lto_gcov_summary.runs = runs;
- lto_gcov_summary.sum_all = lto_input_sleb128 (ib);
- lto_gcov_summary.run_max = lto_input_sleb128 (ib);
- lto_gcov_summary.sum_max = lto_input_sleb128 (ib);
- }
- /* We can support this by scaling all counts to nearest common multiple
- of all different runs, but it is perhaps not worth the effort. */
- else if (profile_info->runs != runs
- || profile_info->sum_all != lto_input_sleb128 (ib)
- || profile_info->run_max != lto_input_sleb128 (ib)
- || profile_info->sum_max != lto_input_sleb128 (ib))
- sorry ("Combining units with different profiles is not supported.");
- /* We allow some units to have profile and other to not have one. This will
- just make unprofiled units to be size optimized that is sane. */
+ file_data->profile_info.runs = runs;
+ file_data->profile_info.sum_max = lto_input_uleb128 (ib);
+ if (runs > file_data->profile_info.sum_max)
+ fatal_error ("Corrupted profile info in %s: sum_max is smaller than runs",
+ file_data->file_name);
+ }
+
+}
+
+/* Rescale profile summaries to the same number of runs in the whole unit. */
+
+static void
+merge_profile_summaries (struct lto_file_decl_data **file_data_vec)
+{
+ struct lto_file_decl_data *file_data;
+ unsigned int j;
+ gcov_unsigned_t max_runs = 0;
+ struct cgraph_node *node;
+ struct cgraph_edge *edge;
+
+ /* Find unit with maximal number of runs. If we ever get serious about
+ roundoff errors, we might also consider computing smallest common
+ multiply. */
+ for (j = 0; (file_data = file_data_vec[j]) != NULL; j++)
+ if (max_runs < file_data->profile_info.runs)
+ max_runs = file_data->profile_info.runs;
+
+ if (!max_runs)
+ return;
+
+ /* Simple overflow check. We probably don't need to support that many train
+ runs. Such a large value probably imply data corruption anyway. */
+ if (max_runs > INT_MAX / REG_BR_PROB_BASE)
+ {
+ sorry ("At most %i profile runs is supported. Perhaps corrupted profile?",
+ INT_MAX / REG_BR_PROB_BASE);
+ return;
}
+ profile_info = <o_gcov_summary;
+ lto_gcov_summary.runs = max_runs;
+ lto_gcov_summary.sum_max = 0;
+
+ /* Rescale all units to the maximal number of runs.
+ sum_max can not be easily merged, as we have no idea what files come from
+ the same run. We do not use the info anyway, so leave it 0. */
+ for (j = 0; (file_data = file_data_vec[j]) != NULL; j++)
+ if (file_data->profile_info.runs)
+ {
+ int scale = ((REG_BR_PROB_BASE * max_runs
+ + file_data->profile_info.runs / 2)
+ / file_data->profile_info.runs);
+ lto_gcov_summary.sum_max = MAX (lto_gcov_summary.sum_max,
+ (file_data->profile_info.sum_max
+ * scale
+ + REG_BR_PROB_BASE / 2)
+ / REG_BR_PROB_BASE);
+ }
+
+ /* Watch roundoff errors. */
+ if (lto_gcov_summary.sum_max < max_runs)
+ lto_gcov_summary.sum_max = max_runs;
+
+ /* If merging already happent at WPA time, we are done. */
+ if (flag_ltrans)
+ return;
+
+ /* Now compute count_materialization_scale of each node.
+ During LTRANS we already have values of count_materialization_scale
+ computed, so just update them. */
+ for (node = cgraph_nodes; node; node = node->next)
+ if (node->local.lto_file_data->profile_info.runs)
+ {
+ int scale;
+
+ scale =
+ ((node->count_materialization_scale * max_runs
+ + node->local.lto_file_data->profile_info.runs / 2)
+ / node->local.lto_file_data->profile_info.runs);
+ node->count_materialization_scale = scale;
+ if (scale < 0)
+ fatal_error ("Profile information in %s corrupted",
+ file_data->file_name);
+
+ if (scale == REG_BR_PROB_BASE)
+ continue;
+ for (edge = node->callees; edge; edge = edge->next_callee)
+ edge->count = ((edge->count * scale + REG_BR_PROB_BASE / 2)
+ / REG_BR_PROB_BASE);
+ node->count = ((node->count * scale + REG_BR_PROB_BASE / 2)
+ / REG_BR_PROB_BASE);
+ }
}
/* Input and merge the cgraph from each of the .o files passed to
ib = lto_create_simple_input_block (file_data, LTO_section_cgraph,
&data, &len);
- input_profile_summary (ib);
+ if (!ib)
+ fatal_error ("cannot find LTO cgraph in %s", file_data->file_name);
+ input_profile_summary (ib, file_data);
file_data->cgraph_node_encoder = lto_cgraph_encoder_new ();
nodes = input_cgraph_1 (file_data, ib);
lto_destroy_simple_input_block (file_data, LTO_section_cgraph,
ib = lto_create_simple_input_block (file_data, LTO_section_varpool,
&data, &len);
+ if (!ib)
+ fatal_error ("cannot find LTO varpool in %s", file_data->file_name);
varpool = input_varpool_1 (file_data, ib);
lto_destroy_simple_input_block (file_data, LTO_section_varpool,
ib, data, len);
ib = lto_create_simple_input_block (file_data, LTO_section_refs,
&data, &len);
+ if (!ib)
+ fatal_error("cannot find LTO section refs in %s", file_data->file_name);
input_refs (ib, nodes, varpool);
lto_destroy_simple_input_block (file_data, LTO_section_refs,
ib, data, len);
VEC_free (cgraph_node_ptr, heap, nodes);
VEC_free (varpool_node_ptr, heap, varpool);
}
+ merge_profile_summaries (file_data_vec);
+
/* Clear out the aux field that was used to store enough state to
tell which nodes should be overwritten. */
lto_output_uleb128_stream (ob->main_stream, index);
lto_output_uleb128_stream (ob->main_stream,
VEC_length (ipa_replace_map_p, node->clone.tree_map));
- for (i = 0; VEC_iterate (ipa_replace_map_p, node->clone.tree_map, i, map); i++)
+ FOR_EACH_VEC_ELT (ipa_replace_map_p, node->clone.tree_map, i, map)
{
int parm_num;
tree parm;
for (parm_num = 0, parm = DECL_ARGUMENTS (node->decl); parm;
- parm = TREE_CHAIN (parm), parm_num++)
+ parm = DECL_CHAIN (parm), parm_num++)
if (map->old_tree == parm)
break;
/* At the moment we assume all old trees to be PARM_DECLs, because we have no
VEC_safe_push (ipa_replace_map_p, gc, node->clone.tree_map, map);
for (parm_num = 0, parm = DECL_ARGUMENTS (node->decl); parm_num;
- parm = TREE_CHAIN (parm))
+ parm = DECL_CHAIN (parm))
parm_num --;
map->parm_num = lto_input_uleb128 (ib_main);
map->old_tree = NULL;