MODIFY_EXPRs that store to a dedicated returned-value variable.
The duplicated eh_region info of the copy will later be appended
to the info for the caller; the eh_region info in copied throwing
- statements and RESX_EXPRs is adjusted accordingly.
+ statements and RESX statements are adjusted accordingly.
Cloning: (only in C++) We have one body for a con/de/structor, and
multiple function decls, each with a unique parameter list.
*pointer_map_insert (id->decl_map, value) = value;
}
+/* Insert a tree->tree mapping for ID. This is only used for
+ variables. */
+
+static void
+insert_debug_decl_map (copy_body_data *id, tree key, tree value)
+{
+ if (!gimple_in_ssa_p (id->src_cfun))
+ return;
+
+ if (!MAY_HAVE_DEBUG_STMTS)
+ return;
+
+ if (!target_for_debug_bind (key))
+ return;
+
+ gcc_assert (TREE_CODE (key) == PARM_DECL);
+ gcc_assert (TREE_CODE (value) == VAR_DECL);
+
+ if (!id->debug_map)
+ id->debug_map = pointer_map_create ();
+
+ *pointer_map_insert (id->debug_map, key) = value;
+}
+
/* Construct new SSA name for old NAME. ID is the inline context. */
static tree
return new_tree;
}
+/* If nonzero, we're remapping the contents of inlined debug
+ statements. If negative, an error has occurred, such as a
+ reference to a variable that isn't available in the inlined
+ context. */
+int processing_debug_stmt = 0;
+
/* Remap DECL during the copying of the BLOCK tree for the function. */
tree
n = (tree *) pointer_map_contains (id->decl_map, decl);
+ if (!n && processing_debug_stmt)
+ {
+ processing_debug_stmt = -1;
+ return decl;
+ }
+
/* If we didn't already have an equivalent for this declaration,
create one now. */
if (!n)
new_tree = build_pointer_type_for_mode (remap_type (TREE_TYPE (type), id),
TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
+ if (TYPE_ATTRIBUTES (type) || TYPE_QUALS (type))
+ new_tree = build_type_attribute_qual_variant (new_tree,
+ TYPE_ATTRIBUTES (type),
+ TYPE_QUALS (type));
insert_decl_map (id, type, new_tree);
return new_tree;
}
new_tree = build_reference_type_for_mode (remap_type (TREE_TYPE (type), id),
TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
+ if (TYPE_ATTRIBUTES (type) || TYPE_QUALS (type))
+ new_tree = build_type_attribute_qual_variant (new_tree,
+ TYPE_ATTRIBUTES (type),
+ TYPE_QUALS (type));
insert_decl_map (id, type, new_tree);
return new_tree;
}
if (can_be_nonlocal (old_var, id))
{
if (TREE_CODE (old_var) == VAR_DECL
+ && ! DECL_EXTERNAL (old_var)
&& (var_ann (old_var) || !gimple_in_ssa_p (cfun)))
cfun->local_decls = tree_cons (NULL_TREE, old_var,
cfun->local_decls);
new_tree = alloc_stmt_list ();
ni = tsi_start (new_tree);
oi = tsi_start (*tp);
+ TREE_TYPE (new_tree) = TREE_TYPE (*tp);
*tp = new_tree;
for (; !tsi_end_p (oi); tsi_next (&oi))
vars. If not referenced from types only. */
if (gimple_in_ssa_p (cfun)
&& TREE_CODE (*tp) == VAR_DECL
- && id->remapping_type_depth == 0)
+ && id->remapping_type_depth == 0
+ && !processing_debug_stmt)
add_referenced_var (*tp);
/* We should never have TREE_BLOCK set on non-statements. */
copy_tree_r (tp, walk_subtrees, NULL);
/* Global variables we haven't seen yet needs to go into referenced
- vars. If not referenced from types only. */
+ vars. If not referenced from types or debug stmts only. */
if (gimple_in_ssa_p (cfun)
&& TREE_CODE (*tp) == VAR_DECL
- && id->remapping_type_depth == 0)
+ && id->remapping_type_depth == 0
+ && !processing_debug_stmt)
add_referenced_var (*tp);
/* If EXPR has block defined, map it to newly constructed block.
TREE_BLOCK (*tp) = new_block;
}
- if (TREE_CODE (*tp) == RESX_EXPR && id->eh_region_offset)
- TREE_OPERAND (*tp, 0) =
- build_int_cst (NULL_TREE,
- id->eh_region_offset
- + TREE_INT_CST_LOW (TREE_OPERAND (*tp, 0)));
-
if (TREE_CODE (*tp) != OMP_CLAUSE)
TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
return NULL_TREE;
}
+/* Helper for remap_gimple_stmt. Given an EH region number for the
+ source function, map that to the duplicate EH region number in
+ the destination function. */
+
+static int
+remap_eh_region_nr (int old_nr, copy_body_data *id)
+{
+ eh_region old_r, new_r;
+ void **slot;
+
+ old_r = get_eh_region_from_number_fn (id->src_cfun, old_nr);
+ slot = pointer_map_contains (id->eh_map, old_r);
+ new_r = (eh_region) *slot;
+
+ return new_r->index;
+}
+
+/* Similar, but operate on INTEGER_CSTs. */
+
+static tree
+remap_eh_region_tree_nr (tree old_t_nr, copy_body_data *id)
+{
+ int old_nr, new_nr;
+
+ old_nr = tree_low_cst (old_t_nr, 0);
+ new_nr = remap_eh_region_nr (old_nr, id);
+
+ return build_int_cst (NULL, new_nr);
+}
/* Helper for copy_bb. Remap statement STMT using the inlining
information in ID. Return the new statement copy. */
}
}
+ if (gimple_debug_bind_p (stmt))
+ {
+ copy = gimple_build_debug_bind (gimple_debug_bind_get_var (stmt),
+ gimple_debug_bind_get_value (stmt),
+ stmt);
+ VEC_safe_push (gimple, heap, id->debug_stmts, copy);
+ return copy;
+ }
+
/* Create a new deep copy of the statement. */
copy = gimple_copy (stmt);
+
+ /* Remap the region numbers for __builtin_eh_{pointer,filter},
+ RESX and EH_DISPATCH. */
+ if (id->eh_map)
+ switch (gimple_code (copy))
+ {
+ case GIMPLE_CALL:
+ {
+ tree r, fndecl = gimple_call_fndecl (copy);
+ if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_EH_COPY_VALUES:
+ r = gimple_call_arg (copy, 1);
+ r = remap_eh_region_tree_nr (r, id);
+ gimple_call_set_arg (copy, 1, r);
+ /* FALLTHRU */
+
+ case BUILT_IN_EH_POINTER:
+ case BUILT_IN_EH_FILTER:
+ r = gimple_call_arg (copy, 0);
+ r = remap_eh_region_tree_nr (r, id);
+ gimple_call_set_arg (copy, 0, r);
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ case GIMPLE_RESX:
+ {
+ int r = gimple_resx_region (copy);
+ r = remap_eh_region_nr (r, id);
+ gimple_resx_set_region (copy, r);
+ }
+ break;
+
+ case GIMPLE_EH_DISPATCH:
+ {
+ int r = gimple_eh_dispatch_region (copy);
+ r = remap_eh_region_nr (r, id);
+ gimple_eh_dispatch_set_region (copy, r);
+ }
+ break;
+
+ default:
+ break;
+ }
}
/* If STMT has a block defined, map it to the newly constructed
gimple_set_block (copy, new_block);
+ if (gimple_debug_bind_p (copy))
+ return copy;
+
/* Remap all the operands in COPY. */
memset (&wi, 0, sizeof (wi));
wi.info = id;
gimple_set_vuse (copy, NULL_TREE);
}
- /* We have to handle EH region remapping of GIMPLE_RESX specially because
- the region number is not an operand. */
- if (gimple_code (stmt) == GIMPLE_RESX && id->eh_region_offset)
- {
- gimple_resx_set_region (copy, gimple_resx_region (stmt) + id->eh_region_offset);
- }
return copy;
}
callgraph edges and update or duplicate them. */
if (is_gimple_call (stmt))
{
- struct cgraph_edge *edge = cgraph_edge (id->src_node, orig_stmt);
+ struct cgraph_edge *edge;
int flags;
switch (id->transform_call_graph_edges)
{
- case CB_CGE_DUPLICATE:
- if (edge)
- cgraph_clone_edge (edge, id->dst_node, stmt,
- REG_BR_PROB_BASE, 1,
- edge->frequency, true);
- break;
-
- case CB_CGE_MOVE_CLONES:
- cgraph_set_call_stmt_including_clones (id->dst_node, orig_stmt, stmt);
- break;
-
- case CB_CGE_MOVE:
- if (edge)
- cgraph_set_call_stmt (edge, stmt);
- break;
-
- default:
- gcc_unreachable ();
+ case CB_CGE_DUPLICATE:
+ edge = cgraph_edge (id->src_node, orig_stmt);
+ if (edge)
+ edge = cgraph_clone_edge (edge, id->dst_node, stmt,
+ REG_BR_PROB_BASE, 1,
+ edge->frequency, true);
+ break;
+
+ case CB_CGE_MOVE_CLONES:
+ cgraph_set_call_stmt_including_clones (id->dst_node,
+ orig_stmt, stmt);
+ edge = cgraph_edge (id->dst_node, stmt);
+ break;
+
+ case CB_CGE_MOVE:
+ edge = cgraph_edge (id->dst_node, orig_stmt);
+ if (edge)
+ cgraph_set_call_stmt (edge, stmt);
+ break;
+
+ default:
+ gcc_unreachable ();
}
- edge = cgraph_edge (id->src_node, orig_stmt);
- /* Constant propagation on argument done during inlining
- may create new direct call. Produce an edge for it. */
- if ((!edge
- || (edge->indirect_call
- && id->transform_call_graph_edges == CB_CGE_MOVE_CLONES))
- && is_gimple_call (stmt)
- && (fn = gimple_call_fndecl (stmt)) != NULL)
- {
- struct cgraph_node *dest = cgraph_node (fn);
-
- /* We have missing edge in the callgraph. This can happen in one case
- where previous inlining turned indirect call into direct call by
- constant propagating arguments. In all other cases we hit a bug
- (incorrect node sharing is most common reason for missing edges. */
- gcc_assert (dest->needed || !dest->analyzed);
- if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES)
- cgraph_create_edge_including_clones (id->dst_node, dest, stmt,
- bb->count,
- compute_call_stmt_bb_frequency (id->dst_node->decl, bb),
- bb->loop_depth,
- CIF_ORIGINALLY_INDIRECT_CALL);
- else
- cgraph_create_edge (id->dst_node, dest, stmt,
- bb->count, CGRAPH_FREQ_BASE,
- bb->loop_depth)->inline_failed
- = CIF_ORIGINALLY_INDIRECT_CALL;
- if (dump_file)
- {
- fprintf (dump_file, "Created new direct edge to %s",
- cgraph_node_name (dest));
- }
- }
+ /* Constant propagation on argument done during inlining
+ may create new direct call. Produce an edge for it. */
+ if ((!edge
+ || (edge->indirect_call
+ && id->transform_call_graph_edges == CB_CGE_MOVE_CLONES))
+ && is_gimple_call (stmt)
+ && (fn = gimple_call_fndecl (stmt)) != NULL)
+ {
+ struct cgraph_node *dest = cgraph_node (fn);
+
+ /* We have missing edge in the callgraph. This can happen
+ when previous inlining turned an indirect call into a
+ direct call by constant propagating arguments. In all
+ other cases we hit a bug (incorrect node sharing is the
+ most common reason for missing edges). */
+ gcc_assert (dest->needed || !dest->analyzed);
+ if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES)
+ cgraph_create_edge_including_clones
+ (id->dst_node, dest, stmt, bb->count,
+ compute_call_stmt_bb_frequency (id->dst_node->decl, bb),
+ bb->loop_depth, CIF_ORIGINALLY_INDIRECT_CALL);
+ else
+ cgraph_create_edge (id->dst_node, dest, stmt,
+ bb->count, CGRAPH_FREQ_BASE,
+ bb->loop_depth)->inline_failed
+ = CIF_ORIGINALLY_INDIRECT_CALL;
+ if (dump_file)
+ {
+ fprintf (dump_file, "Created new direct edge to %s",
+ cgraph_node_name (dest));
+ }
+ }
flags = gimple_call_flags (stmt);
-
if (flags & ECF_MAY_BE_ALLOCA)
cfun->calls_alloca = true;
if (flags & ECF_RETURNS_TWICE)
cfun->calls_setjmp = true;
}
- /* If you think we can abort here, you are wrong.
- There is no region 0 in gimple. */
- gcc_assert (lookup_stmt_eh_region_fn (id->src_cfun, orig_stmt) != 0);
-
- if (stmt_could_throw_p (stmt)
- /* When we are cloning for inlining, we are supposed to
- construct a clone that calls precisely the same functions
- as original. However IPA optimizers might've proved
- earlier some function calls as non-trapping that might
- render some basic blocks dead that might become
- unreachable.
-
- We can't update SSA with unreachable blocks in CFG and thus
- we prevent the scenario by preserving even the "dead" eh
- edges until the point they are later removed by
- fixup_cfg pass. */
- || (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES
- && lookup_stmt_eh_region_fn (id->src_cfun, orig_stmt) > 0))
- {
- int region = lookup_stmt_eh_region_fn (id->src_cfun, orig_stmt);
-
- /* Add an entry for the copied tree in the EH hashtable.
- When cloning or versioning, use the hashtable in
- cfun, and just copy the EH number. When inlining, use the
- hashtable in the caller, and adjust the region number. */
- if (region > 0)
- add_stmt_to_eh_region (stmt, region + id->eh_region_offset);
-
- /* If this tree doesn't have a region associated with it,
- and there is a "current region,"
- then associate this tree with the current region
- and add edges associated with this region. */
- if (lookup_stmt_eh_region_fn (id->src_cfun, orig_stmt) <= 0
- && id->eh_region > 0
- && stmt_could_throw_p (stmt))
- add_stmt_to_eh_region (stmt, id->eh_region);
- }
+ maybe_duplicate_eh_stmt_fn (cfun, stmt, id->src_cfun, orig_stmt,
+ id->eh_map, id->eh_lp_nr);
- if (gimple_in_ssa_p (cfun))
+ if (gimple_in_ssa_p (cfun) && !is_gimple_debug (stmt))
{
ssa_op_iter i;
tree def;
bool can_throw, nonlocal_goto;
copy_stmt = gsi_stmt (si);
- update_stmt (copy_stmt);
- if (gimple_in_ssa_p (cfun))
- mark_symbols_for_renaming (copy_stmt);
+ if (!is_gimple_debug (copy_stmt))
+ {
+ update_stmt (copy_stmt);
+ if (gimple_in_ssa_p (cfun))
+ mark_symbols_for_renaming (copy_stmt);
+ }
/* Do this before the possible split_block. */
gsi_next (&si);
}
}
- if (can_throw)
+ if (gimple_code (copy_stmt) == GIMPLE_EH_DISPATCH)
+ make_eh_dispatch_edges (copy_stmt);
+ else if (can_throw)
make_eh_edges (copy_stmt);
if (nonlocal_goto)
new_arg = force_gimple_operand (new_arg, &stmts, true, NULL);
gsi_insert_seq_on_edge_immediate (new_edge, stmts);
}
- add_phi_arg (new_phi, new_arg, new_edge);
+ add_phi_arg (new_phi, new_arg, new_edge,
+ gimple_phi_arg_location_from_edge (phi, old_edge));
}
}
}
/* Duplicate any exception-handling regions. */
if (cfun->eh)
- {
- id->eh_region_offset
- = duplicate_eh_regions (cfun_to_copy, remap_decl_1, id,
- 0, id->eh_region);
- }
+ id->eh_map = duplicate_eh_regions (cfun_to_copy, NULL, id->eh_lp_nr,
+ remap_decl_1, id);
/* Use aux pointers to map the original blocks to copy. */
FOR_EACH_BB_FN (bb, cfun_to_copy)
entry_block_map->aux = NULL;
exit_block_map->aux = NULL;
+ if (id->eh_map)
+ {
+ pointer_map_destroy (id->eh_map);
+ id->eh_map = NULL;
+ }
+
return new_fndecl;
}
+/* Copy the debug STMT using ID. We deal with these statements in a
+ special way: if any variable in their VALUE expression wasn't
+ remapped yet, we won't remap it, because that would get decl uids
+ out of sync, causing codegen differences between -g and -g0. If
+ this arises, we drop the VALUE expression altogether. */
+
+static void
+copy_debug_stmt (gimple stmt, copy_body_data *id)
+{
+ tree t, *n;
+ struct walk_stmt_info wi;
+
+ t = id->block;
+ if (gimple_block (stmt))
+ {
+ tree *n;
+ n = (tree *) pointer_map_contains (id->decl_map, gimple_block (stmt));
+ if (n)
+ t = *n;
+ }
+ gimple_set_block (stmt, t);
+
+ /* Remap all the operands in COPY. */
+ memset (&wi, 0, sizeof (wi));
+ wi.info = id;
+
+ processing_debug_stmt = 1;
+
+ t = gimple_debug_bind_get_var (stmt);
+
+ if (TREE_CODE (t) == PARM_DECL && id->debug_map
+ && (n = (tree *) pointer_map_contains (id->debug_map, t)))
+ {
+ gcc_assert (TREE_CODE (*n) == VAR_DECL);
+ t = *n;
+ }
+ else
+ walk_tree (&t, remap_gimple_op_r, &wi, NULL);
+
+ gimple_debug_bind_set_var (stmt, t);
+
+ if (gimple_debug_bind_has_value_p (stmt))
+ walk_tree (gimple_debug_bind_get_value_ptr (stmt),
+ remap_gimple_op_r, &wi, NULL);
+
+ /* Punt if any decl couldn't be remapped. */
+ if (processing_debug_stmt < 0)
+ gimple_debug_bind_reset_value (stmt);
+
+ processing_debug_stmt = 0;
+
+ update_stmt (stmt);
+ if (gimple_in_ssa_p (cfun))
+ mark_symbols_for_renaming (stmt);
+}
+
+/* Process deferred debug stmts. In order to give values better odds
+ of being successfully remapped, we delay the processing of debug
+ stmts until all other stmts that might require remapping are
+ processed. */
+
+static void
+copy_debug_stmts (copy_body_data *id)
+{
+ size_t i;
+ gimple stmt;
+
+ if (!id->debug_stmts)
+ return;
+
+ for (i = 0; VEC_iterate (gimple, id->debug_stmts, i, stmt); i++)
+ copy_debug_stmt (stmt, id);
+
+ VEC_free (gimple, heap, id->debug_stmts);
+}
+
/* Make a copy of the body of SRC_FN so that it can be inserted inline in
another function. */
return body;
}
+/* Make a copy of the body of FN so that it can be inserted inline in
+ another function. */
+
static tree
copy_body (copy_body_data *id, gcov_type count, int frequency,
basic_block entry_block_map, basic_block exit_block_map)
/* If this body has a CFG, walk CFG and copy. */
gcc_assert (ENTRY_BLOCK_PTR_FOR_FUNCTION (DECL_STRUCT_FUNCTION (fndecl)));
body = copy_cfg_body (id, count, frequency, entry_block_map, exit_block_map);
+ copy_debug_stmts (id);
return body;
}
return var && auto_var_in_fn_p (var, fn);
}
+/* Append to BB a debug annotation that binds VAR to VALUE, inheriting
+ lexical block and line number information from base_stmt, if given,
+ or from the last stmt of the block otherwise. */
+
+static gimple
+insert_init_debug_bind (copy_body_data *id,
+ basic_block bb, tree var, tree value,
+ gimple base_stmt)
+{
+ gimple note;
+ gimple_stmt_iterator gsi;
+ tree tracked_var;
+
+ if (!gimple_in_ssa_p (id->src_cfun))
+ return NULL;
+
+ if (!MAY_HAVE_DEBUG_STMTS)
+ return NULL;
+
+ tracked_var = target_for_debug_bind (var);
+ if (!tracked_var)
+ return NULL;
+
+ if (bb)
+ {
+ gsi = gsi_last_bb (bb);
+ if (!base_stmt && !gsi_end_p (gsi))
+ base_stmt = gsi_stmt (gsi);
+ }
+
+ note = gimple_build_debug_bind (tracked_var, value, base_stmt);
+
+ if (bb)
+ {
+ if (!gsi_end_p (gsi))
+ gsi_insert_after (&gsi, note, GSI_SAME_STMT);
+ else
+ gsi_insert_before (&gsi, note, GSI_SAME_STMT);
+ }
+
+ return note;
+}
+
static void
-insert_init_stmt (basic_block bb, gimple init_stmt)
+insert_init_stmt (copy_body_data *id, basic_block bb, gimple init_stmt)
{
/* If VAR represents a zero-sized variable, it's possible that the
assignment statement may result in no gimple statements. */
from a rhs with a conversion. Handle that here by forcing the
rhs into a temporary. gimple_regimplify_operands is not
prepared to do this for us. */
- if (!is_gimple_reg (gimple_assign_lhs (init_stmt))
+ if (!is_gimple_debug (init_stmt)
+ && !is_gimple_reg (gimple_assign_lhs (init_stmt))
&& is_gimple_reg_type (TREE_TYPE (gimple_assign_lhs (init_stmt)))
&& gimple_assign_rhs_class (init_stmt) == GIMPLE_UNARY_RHS)
{
gsi_insert_after (&si, init_stmt, GSI_NEW_STMT);
gimple_regimplify_operands (init_stmt, &si);
mark_symbols_for_renaming (init_stmt);
+
+ if (!is_gimple_debug (init_stmt) && MAY_HAVE_DEBUG_STMTS)
+ {
+ tree var, def = gimple_assign_lhs (init_stmt);
+
+ if (TREE_CODE (def) == SSA_NAME)
+ var = SSA_NAME_VAR (def);
+ else
+ var = def;
+
+ insert_init_debug_bind (id, bb, var, def, init_stmt);
+ }
}
}
rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (p), value);
}
+ /* Make an equivalent VAR_DECL. Note that we must NOT remap the type
+ here since the type of this decl must be visible to the calling
+ function. */
+ var = copy_decl_to_var (p, id);
+
+ /* We're actually using the newly-created var. */
+ if (gimple_in_ssa_p (cfun) && TREE_CODE (var) == VAR_DECL)
+ {
+ get_var_ann (var);
+ add_referenced_var (var);
+ }
+
+ /* Declare this new variable. */
+ TREE_CHAIN (var) = *vars;
+ *vars = var;
+
+ /* Make gimplifier happy about this variable. */
+ DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
+
/* If the parameter is never assigned to, has no SSA_NAMEs created,
- we may not need to create a new variable here at all. Instead, we may
- be able to just use the argument value. */
+ we would not need to create a new variable here at all, if it
+ weren't for debug info. Still, we can just use the argument
+ value. */
if (TREE_READONLY (p)
&& !TREE_ADDRESSABLE (p)
&& value && !TREE_SIDE_EFFECTS (value)
&& ! self_inlining_addr_expr (value, fn))
{
insert_decl_map (id, p, value);
- return NULL;
+ insert_debug_decl_map (id, p, var);
+ return insert_init_debug_bind (id, bb, var, value, NULL);
}
}
- /* Make an equivalent VAR_DECL. Note that we must NOT remap the type
- here since the type of this decl must be visible to the calling
- function. */
- var = copy_decl_to_var (p, id);
- if (gimple_in_ssa_p (cfun) && TREE_CODE (var) == VAR_DECL)
- {
- get_var_ann (var);
- add_referenced_var (var);
- }
-
/* Register the VAR_DECL as the equivalent for the PARM_DECL;
that way, when the PARM_DECL is encountered, it will be
automatically replaced by the VAR_DECL. */
insert_decl_map (id, p, var);
- /* Declare this new variable. */
- TREE_CHAIN (var) = *vars;
- *vars = var;
-
- /* Make gimplifier happy about this variable. */
- DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
-
/* Even if P was TREE_READONLY, the new VAR should not be.
In the original code, we would have constructed a
temporary, and then the function body would have never
Do replacement at -O0 for const arguments replaced by constant.
This is important for builtin_constant_p and other construct requiring
- constant argument to be visible in inlined function body.
-
- FIXME: This usually kills the last connection in between inlined
- function parameter and the actual value in debug info. Can we do
- better here? If we just inserted the statement, copy propagation
- would kill it anyway as it always did in older versions of GCC.
-
- We might want to introduce a notion that single SSA_NAME might
- represent multiple variables for purposes of debugging. */
+ constant argument to be visible in inlined function body. */
if (gimple_in_ssa_p (cfun) && rhs && def && is_gimple_reg (p)
&& (optimize
|| (TREE_READONLY (p)
&& !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
{
insert_decl_map (id, def, rhs);
- return NULL;
+ return insert_init_debug_bind (id, bb, var, rhs, NULL);
}
/* If the value of argument is never used, don't care about initializing
if (optimize && gimple_in_ssa_p (cfun) && !def && is_gimple_reg (p))
{
gcc_assert (!value || !TREE_SIDE_EFFECTS (value));
- return NULL;
+ return insert_init_debug_bind (id, bb, var, rhs, NULL);
}
/* Initialize this VAR_DECL from the equivalent argument. Convert
if (rhs == error_mark_node)
{
insert_decl_map (id, p, var);
- return NULL;
+ return insert_init_debug_bind (id, bb, var, rhs, NULL);
}
STRIP_USELESS_TYPE_CONVERSION (rhs);
init_stmt = gimple_build_assign (var, rhs);
if (bb && init_stmt)
- insert_init_stmt (bb, init_stmt);
+ insert_init_stmt (id, bb, init_stmt);
}
return init_stmt;
}
lhs = gimple_assign_lhs (stmt);
rhs = gimple_assign_rhs1 (stmt);
- /* EH magic stuff is most probably going to be optimized out.
- We rarely really need to save EH info for unwinding
- nested exceptions. */
- if (TREE_CODE (lhs) == FILTER_EXPR
- || TREE_CODE (lhs) == EXC_PTR_EXPR
- || TREE_CODE (rhs) == FILTER_EXPR
- || TREE_CODE (rhs) == EXC_PTR_EXPR)
- return 0;
if (is_gimple_reg (lhs))
cost = 0;
else
case GIMPLE_PHI:
case GIMPLE_RETURN:
case GIMPLE_PREDICT:
+ case GIMPLE_DEBUG:
return 0;
case GIMPLE_ASM:
- case GIMPLE_RESX:
return 1;
+ case GIMPLE_RESX:
+ /* This is either going to be an external function call with one
+ argument, or two register copy statements plus a goto. */
+ return 2;
+
+ case GIMPLE_EH_DISPATCH:
+ /* ??? This is going to turn into a switch statement. Ideally
+ we'd have a look at the eh region and estimate the number of
+ edges involved. */
+ return 10;
+
case GIMPLE_BIND:
return estimate_num_insns_seq (gimple_bind_body (stmt), weights);
{
tree retvar, use_retvar;
tree fn;
- struct pointer_map_t *st;
+ struct pointer_map_t *st, *dst;
tree return_slot;
tree modify_dest;
location_t saved_location;
cg_edge = cgraph_edge (id->dst_node, stmt);
+ /* Don't inline functions with different EH personalities. */
+ if (DECL_FUNCTION_PERSONALITY (cg_edge->caller->decl)
+ && DECL_FUNCTION_PERSONALITY (cg_edge->callee->decl)
+ && (DECL_FUNCTION_PERSONALITY (cg_edge->caller->decl)
+ != DECL_FUNCTION_PERSONALITY (cg_edge->callee->decl)))
+ goto egress;
+
/* Don't try to inline functions that are not well-suited to
inlining. */
if (!cgraph_inline_p (cg_edge, &reason))
#endif
/* We will be inlining this callee. */
- id->eh_region = lookup_stmt_eh_region (stmt);
+ id->eh_lp_nr = lookup_stmt_eh_lp (stmt);
+
+ /* Update the callers EH personality. */
+ if (DECL_FUNCTION_PERSONALITY (cg_edge->callee->decl))
+ DECL_FUNCTION_PERSONALITY (cg_edge->caller->decl)
+ = DECL_FUNCTION_PERSONALITY (cg_edge->callee->decl);
/* Split the block holding the GIMPLE_CALL. */
e = split_block (bb, stmt);
map. */
st = id->decl_map;
id->decl_map = pointer_map_create ();
+ dst = id->debug_map;
+ id->debug_map = NULL;
/* Record the function we are about to inline. */
id->src_fn = fn;
}
/* Clean up. */
+ if (id->debug_map)
+ {
+ pointer_map_destroy (id->debug_map);
+ id->debug_map = dst;
+ }
pointer_map_destroy (id->decl_map);
id->decl_map = st;
gimple old_stmt = gsi_stmt (gsi);
tree old_decl = is_gimple_call (old_stmt) ? gimple_call_fndecl (old_stmt) : 0;
- if (fold_stmt (&gsi))
+ if (old_decl && DECL_BUILT_IN (old_decl))
+ {
+ /* Folding builtins can create multiple instructions,
+ we need to look at all of them. */
+ gimple_stmt_iterator i2 = gsi;
+ gsi_prev (&i2);
+ if (fold_stmt (&gsi))
+ {
+ gimple new_stmt;
+ if (gsi_end_p (i2))
+ i2 = gsi_start_bb (BASIC_BLOCK (first));
+ else
+ gsi_next (&i2);
+ while (1)
+ {
+ new_stmt = gsi_stmt (i2);
+ update_stmt (new_stmt);
+ cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
+ new_stmt);
+
+ if (new_stmt == gsi_stmt (gsi))
+ {
+ /* It is okay to check only for the very last
+ of these statements. If it is a throwing
+ statement nothing will change. If it isn't
+ this can remove EH edges. If that weren't
+ correct then because some intermediate stmts
+ throw, but not the last one. That would mean
+ we'd have to split the block, which we can't
+ here and we'd loose anyway. And as builtins
+ probably never throw, this all
+ is mood anyway. */
+ if (maybe_clean_or_replace_eh_stmt (old_stmt,
+ new_stmt))
+ gimple_purge_dead_eh_edges (BASIC_BLOCK (first));
+ break;
+ }
+ gsi_next (&i2);
+ }
+ }
+ }
+ else if (fold_stmt (&gsi))
{
/* Re-read the statement from GSI as fold_stmt() may
have changed it. */
if (is_gimple_call (old_stmt)
|| is_gimple_call (new_stmt))
- cgraph_update_edges_for_call_stmt (old_stmt, old_decl, new_stmt);
+ cgraph_update_edges_for_call_stmt (old_stmt, old_decl,
+ new_stmt);
if (maybe_clean_or_replace_eh_stmt (old_stmt, new_stmt))
gimple_purge_dead_eh_edges (BASIC_BLOCK (first));
fold_marked_statements (last, id.statements_to_fold);
pointer_set_destroy (id.statements_to_fold);
+ gcc_assert (!id.debug_stmts);
+
/* Renumber the (code) basic_blocks consecutively. */
compact_blocks ();
/* Renumber the lexical scoping (non-code) blocks consecutively. */
id.src_fn = current_function_decl;
id.dst_fn = current_function_decl;
id.decl_map = pointer_map_create ();
+ id.debug_map = NULL;
id.copy_decl = copy_decl_no_change;
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
/* Clean up. */
pointer_map_destroy (id.decl_map);
+ if (id.debug_map)
+ pointer_map_destroy (id.debug_map);
return expr;
}
id.src_fn = current_function_decl;
id.dst_fn = current_function_decl;
id.decl_map = pointer_map_create ();
+ id.debug_map = NULL;
id.copy_decl = copy_decl_no_change;
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
/* Clean up. */
pointer_map_destroy (id.decl_map);
+ if (id.debug_map)
+ pointer_map_destroy (id.debug_map);
return copy;
}
bool
tree_versionable_function_p (tree fndecl)
{
- return copy_forbidden (DECL_STRUCT_FUNCTION (fndecl), fndecl) == NULL;
+ return (!lookup_attribute ("noclone", DECL_ATTRIBUTES (fndecl))
+ && copy_forbidden (DECL_STRUCT_FUNCTION (fndecl), fndecl) == NULL);
}
/* Delete all unreachable basic blocks and update callgraph.
tree p;
unsigned i;
struct ipa_replace_map *replace_info;
- basic_block old_entry_block;
+ basic_block old_entry_block, bb;
VEC (gimple, heap) *init_stmts = VEC_alloc (gimple, heap, 10);
tree t_step;
DECL_ARTIFICIAL (new_decl) = 1;
DECL_ABSTRACT_ORIGIN (new_decl) = DECL_ORIGIN (old_decl);
+ DECL_FUNCTION_PERSONALITY (new_decl) = DECL_FUNCTION_PERSONALITY (old_decl);
/* Prepare the data structures for the tree copy. */
memset (&id, 0, sizeof (id));
/* Generate a new name for the new version. */
id.statements_to_fold = pointer_set_create ();
-
+
id.decl_map = pointer_map_create ();
+ id.debug_map = NULL;
id.src_fn = old_decl;
id.dst_fn = new_decl;
id.src_node = old_version_node;
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (new_decl);
- if (VEC_length (gimple, init_stmts))
- {
- basic_block bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
- while (VEC_length (gimple, init_stmts))
- insert_init_stmt (bb, VEC_pop (gimple, init_stmts));
- }
+ /* We want to create the BB unconditionally, so that the addition of
+ debug stmts doesn't affect BB count, which may in the end cause
+ codegen differences. */
+ bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ while (VEC_length (gimple, init_stmts))
+ insert_init_stmt (&id, bb, VEC_pop (gimple, init_stmts));
update_clone_info (&id);
/* Remap the nonlocal_goto_save_area, if any. */
/* Clean up. */
pointer_map_destroy (id.decl_map);
+ if (id.debug_map)
+ pointer_map_destroy (id.debug_map);
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
+ gcc_assert (!id.debug_stmts);
VEC_free (gimple, heap, init_stmts);
pop_cfun ();
current_function_decl = old_current_function_decl;
id.do_not_unshare = true;
/* We're not inside any EH region. */
- id.eh_region = -1;
+ id.eh_lp_nr = 0;
t = copy_tree_body (&id);
pointer_map_destroy (decl_map);
id.dst_fn = current_function_decl;
id.src_cfun = cfun;
id.decl_map = pointer_map_create ();
+ id.debug_map = NULL;
id.copy_decl = copy_decl_no_change;
type = remap_type_1 (type, &id);
pointer_map_destroy (id.decl_map);
+ if (id.debug_map)
+ pointer_map_destroy (id.debug_map);
TYPE_CANONICAL (type) = type;
}
/* Return whether it is safe to inline a function because it used different
- target specific options or different optimization options. */
+ target specific options or call site actual types mismatch parameter types.
+ E is the call edge to be checked. */
bool
-tree_can_inline_p (tree caller, tree callee)
+tree_can_inline_p (struct cgraph_edge *e)
{
#if 0
/* This causes a regression in SPEC in that it prevents a cold function from
return false;
}
#endif
+ tree caller, callee;
+
+ caller = e->caller->decl;
+ callee = e->callee->decl;
+
+ /* We cannot inline a function that uses a different EH personality
+ than the caller. */
+ if (DECL_FUNCTION_PERSONALITY (caller)
+ && DECL_FUNCTION_PERSONALITY (callee)
+ && (DECL_FUNCTION_PERSONALITY (caller)
+ != DECL_FUNCTION_PERSONALITY (callee)))
+ {
+ e->inline_failed = CIF_UNSPECIFIED;
+ gimple_call_set_cannot_inline (e->call_stmt, true);
+ return false;
+ }
/* Allow the backend to decide if inlining is ok. */
- return targetm.target_option.can_inline_p (caller, callee);
+ if (!targetm.target_option.can_inline_p (caller, callee))
+ {
+ e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
+ gimple_call_set_cannot_inline (e->call_stmt, true);
+ return false;
+ }
+
+ if (!gimple_check_call_args (e->call_stmt))
+ {
+ e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
+ gimple_call_set_cannot_inline (e->call_stmt, true);
+ return false;
+ }
+
+ return true;
}