static struct cgraph_node_hook_list *node_removal_hook_holder;
static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
static struct cgraph_2node_hook_list *node_duplication_hook_holder;
+static struct cgraph_node_hook_list *function_insertion_hook_holder;
/* Add cgraph NODE described by INFO to the worklist WL regardless of whether
it is in one or not. It should almost never be used directly, as opposed to
wl = NULL;
for (node = cgraph_nodes; node; node = node->next)
- if (node->analyzed)
+ if (node->analyzed && !node->alias)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
/* Unreachable nodes should have been eliminated before ipcp and
/* Return index of the formal whose tree is PTREE in function which corresponds
to INFO. */
-static int
+int
ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree)
{
int i, count;
for (cs = node->callees; cs; cs = cs->next_callee)
{
+ struct cgraph_node *callee = cgraph_function_or_thunk_node (cs->callee, NULL);
/* We do not need to bother analyzing calls to unknown
functions unless they may become known during lto/whopr. */
if (!cs->callee->analyzed && !flag_lto)
ipa_count_arguments (cs);
/* If the descriptor of the callee is not initialized yet, we have to do
it now. */
- if (cs->callee->analyzed)
- ipa_initialize_node_params (cs->callee);
+ if (callee->analyzed)
+ ipa_initialize_node_params (callee);
if (ipa_get_cs_argument_count (IPA_EDGE_REF (cs))
- != ipa_get_param_count (IPA_NODE_REF (cs->callee)))
- ipa_set_called_with_variable_arg (IPA_NODE_REF (cs->callee));
+ != ipa_get_param_count (IPA_NODE_REF (callee)))
+ ipa_set_called_with_variable_arg (IPA_NODE_REF (callee));
ipa_compute_jump_functions_for_edge (parms_info, cs);
}
if (!branch || gimple_code (branch) != GIMPLE_COND)
return;
- if (gimple_cond_code (branch) != NE_EXPR
+ if ((gimple_cond_code (branch) != NE_EXPR
+ && gimple_cond_code (branch) != EQ_EXPR)
|| !integer_zerop (gimple_cond_rhs (branch)))
return;
fprintf (dump_file, "\n");
}
}
+ callee = cgraph_function_or_thunk_node (callee, NULL);
if (ipa_get_cs_argument_count (IPA_EDGE_REF (ie))
!= ipa_get_param_count (IPA_NODE_REF (callee)))
type = ie->indirect_info->otr_type;
binfo = get_binfo_at_offset (binfo, ie->indirect_info->anc_offset, type);
if (binfo)
- target = gimple_get_virt_method_for_binfo (token, binfo, &delta, true);
+ target = gimple_get_virt_method_for_binfo (token, binfo, &delta);
else
return NULL;
ipa_propagate_indirect_call_infos (struct cgraph_edge *cs,
VEC (cgraph_edge_p, heap) **new_edges)
{
- /* FIXME lto: We do not stream out indirect call information. */
- if (flag_wpa)
- return false;
-
/* Do nothing if the preparation phase has not been carried out yet
(i.e. during early inlining). */
if (!ipa_node_params_vector)
void
ipa_free_node_params_substructures (struct ipa_node_params *info)
{
- if (info->params)
- free (info->params);
+ free (info->params);
memset (info, 0, sizeof (*info));
}
new_info->node_enqueued = old_info->node_enqueued;
}
+
+/* Analyze newly added function into callgraph. */
+
+static void
+ipa_add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
+{
+ ipa_analyze_node (node);
+}
+
/* Register our cgraph hooks if they are not already there. */
void
if (!node_duplication_hook_holder)
node_duplication_hook_holder =
cgraph_add_node_duplication_hook (&ipa_node_duplication_hook, NULL);
+ function_insertion_hook_holder =
+ cgraph_add_function_insertion_hook (&ipa_add_new_function, NULL);
}
/* Unregister our cgraph hooks if they are not already there. */
edge_duplication_hook_holder = NULL;
cgraph_remove_node_duplication_hook (node_duplication_hook_holder);
node_duplication_hook_holder = NULL;
+ cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
+ function_insertion_hook_holder = NULL;
}
/* Allocate all necessary data structures necessary for indirect inlining. */
base_offset
+ adj->offset / BITS_PER_UNIT);
off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1),
- off, 0);
+ off);
base = TREE_OPERAND (base, 0);
}
else
lto_output_uleb128_stream (ob->main_stream, node_ref);
bp = bitpack_create (ob->main_stream);
- bp_pack_value (&bp, info->called_with_var_arguments, 1);
gcc_assert (info->uses_analysis_done
|| ipa_get_param_count (info) == 0);
gcc_assert (!info->node_enqueued);
ipa_write_jump_function (ob, ipa_get_ith_jump_func (args, j));
}
for (e = node->indirect_calls; e; e = e->next_callee)
- ipa_write_indirect_edge_info (ob, e);
+ {
+ struct ipa_edge_args *args = IPA_EDGE_REF (e);
+
+ lto_output_uleb128_stream (ob->main_stream,
+ ipa_get_cs_argument_count (args));
+ for (j = 0; j < ipa_get_cs_argument_count (args); j++)
+ ipa_write_jump_function (ob, ipa_get_ith_jump_func (args, j));
+ ipa_write_indirect_edge_info (ob, e);
+ }
}
/* Stream in NODE info from IB. */
ipa_initialize_node_params (node);
bp = lto_input_bitpack (ib);
- info->called_with_var_arguments = bp_unpack_value (&bp, 1);
if (ipa_get_param_count (info) != 0)
info->uses_analysis_done = true;
info->node_enqueued = false;
ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k), data_in);
}
for (e = node->indirect_calls; e; e = e->next_callee)
- ipa_read_indirect_edge_info (ib, data_in, e);
+ {
+ struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ int count = lto_input_uleb128 (ib);
+
+ ipa_set_cs_argument_count (args, count);
+ if (count)
+ {
+ args->jump_functions = ggc_alloc_cleared_vec_ipa_jump_func
+ (ipa_get_cs_argument_count (args));
+ for (k = 0; k < ipa_get_cs_argument_count (args); k++)
+ ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k), data_in);
+ }
+ ipa_read_indirect_edge_info (ib, data_in, e);
+ }
}
/* Write jump functions for nodes in SET. */
ipa_prop_write_jump_functions (cgraph_node_set set)
{
struct cgraph_node *node;
- struct output_block *ob = create_output_block (LTO_section_jump_functions);
+ struct output_block *ob;
unsigned int count = 0;
cgraph_node_set_iterator csi;
- ob->cgraph_node = NULL;
+ if (!ipa_node_params_vector)
+ return;
+ ob = create_output_block (LTO_section_jump_functions);
+ ob->cgraph_node = NULL;
for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
{
node = csi_node (csi);
- if (node->analyzed && IPA_NODE_REF (node) != NULL)
+ if (cgraph_function_with_gimple_body_p (node)
+ && IPA_NODE_REF (node) != NULL)
count++;
}
for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
{
node = csi_node (csi);
- if (node->analyzed && IPA_NODE_REF (node) != NULL)
+ if (cgraph_function_with_gimple_body_p (node)
+ && IPA_NODE_REF (node) != NULL)
ipa_write_node_info (ob, node);
}
lto_output_1_stream (ob->main_stream, 0);
if (node->analyzed)
for (cs = node->callees; cs; cs = cs->next_callee)
{
+ struct cgraph_node *callee;
+
+ callee = cgraph_function_or_thunk_node (cs->callee, NULL);
if (ipa_get_cs_argument_count (IPA_EDGE_REF (cs))
- != ipa_get_param_count (IPA_NODE_REF (cs->callee)))
- ipa_set_called_with_variable_arg (IPA_NODE_REF (cs->callee));
+ != ipa_get_param_count (IPA_NODE_REF (callee)))
+ ipa_set_called_with_variable_arg (IPA_NODE_REF (callee));
}
}
+
+/* Given the jump function JFUNC, compute the lattice LAT that describes the
+ value coming down the callsite. INFO describes the caller node so that
+ pass-through jump functions can be evaluated. */
+
+void
+ipa_lattice_from_jfunc (struct ipa_node_params *info, struct ipcp_lattice *lat,
+ struct ipa_jump_func *jfunc)
+{
+ if (jfunc->type == IPA_JF_CONST)
+ {
+ lat->type = IPA_CONST_VALUE;
+ lat->constant = jfunc->value.constant;
+ }
+ else if (jfunc->type == IPA_JF_PASS_THROUGH)
+ {
+ struct ipcp_lattice *caller_lat;
+ tree cst;
+
+ caller_lat = ipa_get_lattice (info, jfunc->value.pass_through.formal_id);
+ lat->type = caller_lat->type;
+ if (caller_lat->type != IPA_CONST_VALUE)
+ return;
+ cst = caller_lat->constant;
+
+ if (jfunc->value.pass_through.operation != NOP_EXPR)
+ {
+ tree restype;
+ if (TREE_CODE_CLASS (jfunc->value.pass_through.operation)
+ == tcc_comparison)
+ restype = boolean_type_node;
+ else
+ restype = TREE_TYPE (cst);
+ cst = fold_binary (jfunc->value.pass_through.operation,
+ restype, cst, jfunc->value.pass_through.operand);
+ }
+ if (!cst || !is_gimple_ip_invariant (cst))
+ lat->type = IPA_BOTTOM;
+ lat->constant = cst;
+ }
+ else if (jfunc->type == IPA_JF_ANCESTOR)
+ {
+ struct ipcp_lattice *caller_lat;
+ tree t;
+
+ caller_lat = ipa_get_lattice (info, jfunc->value.ancestor.formal_id);
+ lat->type = caller_lat->type;
+ if (caller_lat->type != IPA_CONST_VALUE)
+ return;
+ if (TREE_CODE (caller_lat->constant) != ADDR_EXPR)
+ {
+ /* This can happen when the constant is a NULL pointer. */
+ lat->type = IPA_BOTTOM;
+ return;
+ }
+ t = TREE_OPERAND (caller_lat->constant, 0);
+ t = build_ref_for_offset (EXPR_LOCATION (t), t,
+ jfunc->value.ancestor.offset,
+ jfunc->value.ancestor.type, NULL, false);
+ lat->constant = build_fold_addr_expr (t);
+ }
+ else
+ lat->type = IPA_BOTTOM;
+}
+
+/* Determine whether JFUNC evaluates to a constant and if so, return it.
+ Otherwise return NULL. INFO describes the caller node so that pass-through
+ jump functions can be evaluated. */
+
+tree
+ipa_cst_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc)
+{
+ struct ipcp_lattice lat;
+
+ ipa_lattice_from_jfunc (info, &lat, jfunc);
+ if (lat.type == IPA_CONST_VALUE)
+ return lat.constant;
+ else
+ return NULL_TREE;
+}