{
tree stmt = tsi_stmt (oi);
if (TREE_CODE (stmt) == STATEMENT_LIST)
+ /* This copy is not redundant; tsi_link_after will smash this
+ STATEMENT_LIST into the end of the one we're building, and we
+ don't want to do that with the original. */
copy_statement_list (&stmt);
tsi_link_after (&ni, stmt, TSI_CONTINUE_LINKING);
}
{
/* Otherwise, just copy the node. Note that copy_tree_r already
knows not to copy VAR_DECLs, etc., so this is safe. */
+
+ /* We should never have TREE_BLOCK set on non-statements. */
+ if (EXPR_P (*tp))
+ gcc_assert (!TREE_BLOCK (*tp));
+
if (TREE_CODE (*tp) == MEM_REF)
{
tree ptr = TREE_OPERAND (*tp, 0);
+ tree type = remap_type (TREE_TYPE (*tp), id);
tree old = *tp;
tree tem;
if ((tem = maybe_fold_offset_to_reference (EXPR_LOCATION (*tp),
ptr,
TREE_OPERAND (*tp, 1),
- TREE_TYPE (*tp)))
+ type))
&& TREE_THIS_VOLATILE (tem) == TREE_THIS_VOLATILE (old))
{
tree *tem_basep = &tem;
}
else
{
- *tp = fold_build2 (MEM_REF, TREE_TYPE (*tp),
+ *tp = fold_build2 (MEM_REF, type,
ptr, TREE_OPERAND (*tp, 1));
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_THIS_NOTRAP (*tp) = TREE_THIS_NOTRAP (old);
tweak some special cases. */
copy_tree_r (tp, walk_subtrees, NULL);
+ if (TREE_CODE (*tp) != OMP_CLAUSE)
+ TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
+
/* Global variables we haven't seen yet need to go into referenced
vars. If not referenced from types only. */
if (gimple_in_ssa_p (cfun)
&& !processing_debug_stmt)
add_referenced_var (*tp);
- /* We should never have TREE_BLOCK set on non-statements. */
- if (EXPR_P (*tp))
- gcc_assert (!TREE_BLOCK (*tp));
-
- if (TREE_CODE (*tp) != OMP_CLAUSE)
- TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
-
if (TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3))
{
/* The copied TARGET_EXPR has never been expanded, even if the
old_nr = tree_low_cst (old_t_nr, 0);
new_nr = remap_eh_region_nr (old_nr, id);
- return build_int_cst (NULL, new_nr);
+ return build_int_cst (integer_type_node, new_nr);
}
/* Helper for copy_bb. Remap statement STMT using the inlining
edge = cgraph_clone_edge (edge, id->dst_node, stmt,
gimple_uid (stmt),
REG_BR_PROB_BASE, CGRAPH_FREQ_BASE,
- edge->frequency, true);
+ true);
/* We could also just rescale the frequency, but
doing so would introduce roundoff errors and make
verifier unhappy. */
if ((!edge
|| (edge->indirect_inlining_edge
&& id->transform_call_graph_edges == CB_CGE_MOVE_CLONES))
+ && id->dst_node->analyzed
&& (fn = gimple_call_fndecl (stmt)) != NULL)
{
- struct cgraph_node *dest = cgraph_node (fn);
+ struct cgraph_node *dest = cgraph_get_node (fn);
/* We have missing edge in the callgraph. This can happen
when previous inlining turned an indirect call into a
(id->dst_node, dest, orig_stmt, stmt, bb->count,
compute_call_stmt_bb_frequency (id->dst_node->decl,
copy_basic_block),
- bb->loop_depth, CIF_ORIGINALLY_INDIRECT_CALL);
+ CIF_ORIGINALLY_INDIRECT_CALL);
else
cgraph_create_edge (id->dst_node, dest, stmt,
bb->count,
compute_call_stmt_bb_frequency
- (id->dst_node->decl, copy_basic_block),
- bb->loop_depth)->inline_failed
+ (id->dst_node->decl, copy_basic_block))->inline_failed
= CIF_ORIGINALLY_INDIRECT_CALL;
if (dump_file)
{
cfun->va_list_fpr_size = src_cfun->va_list_fpr_size;
cfun->has_nonlocal_label = src_cfun->has_nonlocal_label;
cfun->stdarg = src_cfun->stdarg;
- cfun->dont_save_pending_sizes_p = src_cfun->dont_save_pending_sizes_p;
cfun->after_inlining = src_cfun->after_inlining;
cfun->can_throw_non_call_exceptions
= src_cfun->can_throw_non_call_exceptions;
this may change program's memory overhead drastically when the
function using alloca is called in loop. In GCC present in
SPEC2000 inlining into schedule_block cause it to require 2GB of
- RAM instead of 256MB. */
+ RAM instead of 256MB. Don't do so for alloca calls emitted for
+ VLA objects as those can't cause unbounded growth (they're always
+ wrapped inside stack_save/stack_restore regions. */
if (gimple_alloca_call_p (stmt)
+ && !gimple_call_alloca_for_var_p (stmt)
&& !lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn)))
{
inline_forbidden_reason
return forbidden_p;
}
-/* Return true if CALLEE cannot be inlined into CALLER. */
-
-static bool
-inline_forbidden_into_p (tree caller, tree callee)
-{
- /* Don't inline if the functions have different EH personalities. */
- if (DECL_FUNCTION_PERSONALITY (caller)
- && DECL_FUNCTION_PERSONALITY (callee)
- && (DECL_FUNCTION_PERSONALITY (caller)
- != DECL_FUNCTION_PERSONALITY (callee)))
- return true;
-
- /* Don't inline if the callee can throw non-call exceptions but the
- caller cannot. */
- if (DECL_STRUCT_FUNCTION (callee)
- && DECL_STRUCT_FUNCTION (callee)->can_throw_non_call_exceptions
- && !(DECL_STRUCT_FUNCTION (caller)
- && DECL_STRUCT_FUNCTION (caller)->can_throw_non_call_exceptions))
- return true;
-
- return false;
-}
-
/* Returns nonzero if FN is a function that does not have any
fundamental inline blocking properties. */
case GIMPLE_CALL:
{
tree decl = gimple_call_fndecl (stmt);
- tree addr = gimple_call_fn (stmt);
- tree funtype = TREE_TYPE (addr);
- bool stdarg = false;
-
- if (POINTER_TYPE_P (funtype))
- funtype = TREE_TYPE (funtype);
+ struct cgraph_node *node;
/* Do not special case builtins where we see the body.
This just confuse inliner. */
- if (!decl || cgraph_node (decl)->analyzed)
+ if (!decl || !(node = cgraph_get_node (decl)) || node->analyzed)
;
/* For buitins that are likely expanded to nothing or
inlined do not account operand costs. */
}
cost = weights->call_cost;
- if (decl)
- funtype = TREE_TYPE (decl);
-
- if (!VOID_TYPE_P (TREE_TYPE (funtype)))
- cost += estimate_move_cost (TREE_TYPE (funtype));
-
- if (funtype)
- stdarg = stdarg_p (funtype);
-
- /* Our cost must be kept in sync with
- cgraph_estimate_size_after_inlining that does use function
- declaration to figure out the arguments.
-
- For functions taking variable list of arguments we must
- look into call statement intself. This is safe because
- we will get only higher costs and in most cases we will
- not inline these anyway. */
- if (decl && DECL_ARGUMENTS (decl) && !stdarg)
- {
- tree arg;
- for (arg = DECL_ARGUMENTS (decl); arg; arg = DECL_CHAIN (arg))
- if (!VOID_TYPE_P (TREE_TYPE (arg)))
- cost += estimate_move_cost (TREE_TYPE (arg));
- }
- else if (funtype && prototype_p (funtype) && !stdarg)
+ if (gimple_call_lhs (stmt))
+ cost += estimate_move_cost (TREE_TYPE (gimple_call_lhs (stmt)));
+ for (i = 0; i < gimple_call_num_args (stmt); i++)
{
- tree t;
- for (t = TYPE_ARG_TYPES (funtype); t && t != void_list_node;
- t = TREE_CHAIN (t))
- if (!VOID_TYPE_P (TREE_VALUE (t)))
- cost += estimate_move_cost (TREE_VALUE (t));
+ tree arg = gimple_call_arg (stmt, i);
+ cost += estimate_move_cost (TREE_TYPE (arg));
}
- else
- {
- for (i = 0; i < gimple_call_num_args (stmt); i++)
- {
- tree arg = gimple_call_arg (stmt, i);
- if (!VOID_TYPE_P (TREE_TYPE (arg)))
- cost += estimate_move_cost (TREE_TYPE (arg));
- }
- }
-
break;
}
if (gimple_code (stmt) != GIMPLE_CALL)
goto egress;
- /* Objective C and fortran still calls tree_rest_of_compilation directly.
- Kill this check once this is fixed. */
- if (!id->dst_node->analyzed)
- goto egress;
-
cg_edge = cgraph_edge (id->dst_node, stmt);
gcc_checking_assert (cg_edge);
/* First, see if we can figure out what function is being called.
&& gimple_has_body_p (DECL_ABSTRACT_ORIGIN (fn)))
fn = DECL_ABSTRACT_ORIGIN (fn);
- /* First check that inlining isn't simply forbidden in this case. */
- if (inline_forbidden_into_p (cg_edge->caller->decl, cg_edge->callee->decl))
- goto egress;
-
/* Don't try to inline functions that are not well-suited to inlining. */
if (!cgraph_inline_p (cg_edge, &reason))
{
_(cgraph_inline_failed_string (reason)));
sorry ("called from here");
}
- else if (warn_inline && DECL_DECLARED_INLINE_P (fn)
+ else if (warn_inline
+ && DECL_DECLARED_INLINE_P (fn)
+ && !DECL_NO_INLINE_WARNING_P (fn)
&& !DECL_IN_SYSTEM_HEADER (fn)
&& reason != CIF_UNSPECIFIED
&& !lookup_attribute ("noinline", DECL_ATTRIBUTES (fn))
+ /* Do not warn about not inlined recursive calls. */
+ && !cgraph_edge_recursive_p (cg_edge)
/* Avoid warnings during early inline pass. */
&& cgraph_global_info_ready)
{
/* Clear out ID. */
memset (&id, 0, sizeof (id));
- id.src_node = id.dst_node = cgraph_node (fn);
+ id.src_node = id.dst_node = cgraph_get_node (fn);
+ gcc_assert (id.dst_node->analyzed);
id.dst_fn = fn;
/* Or any functions that aren't finished yet. */
if (current_function_decl)
here. */
tree chain = NULL_TREE, new_tree;
- chain = TREE_CHAIN (*tp);
+ if (CODE_CONTAINS_STRUCT (code, TS_COMMON))
+ chain = TREE_CHAIN (*tp);
/* Copy the node. */
new_tree = copy_node (*tp);
CONSTRUCTOR_ELTS (*tp));
*tp = new_tree;
}
+ else if (code == STATEMENT_LIST)
+ /* We used to just abort on STATEMENT_LIST, but we can run into them
+ with statement-expressions (c++/40975). */
+ copy_statement_list (tp);
else if (TREE_CODE_CLASS (code) == tcc_type)
*walk_subtrees = 0;
else if (TREE_CODE_CLASS (code) == tcc_declaration)
*walk_subtrees = 0;
else if (TREE_CODE_CLASS (code) == tcc_constant)
*walk_subtrees = 0;
- else
- gcc_assert (code != STATEMENT_LIST);
return NULL_TREE;
}
}
}
- if (changed)
- tidy_fallthru_edges ();
return changed;
}
&& TREE_CODE (new_decl) == FUNCTION_DECL);
DECL_POSSIBLY_INLINED (old_decl) = 1;
- old_version_node = cgraph_node (old_decl);
- new_version_node = cgraph_node (new_decl);
+ old_version_node = cgraph_get_node (old_decl);
+ gcc_checking_assert (old_version_node);
+ new_version_node = cgraph_get_node (new_decl);
+ gcc_checking_assert (new_version_node);
/* Output the inlining info for this abstract function, since it has been
inlined. If we don't do this now, we can lose the information about the
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
id.transform_new_cfg = false;
id.transform_return_to_modify = true;
- id.transform_lang_insert_block = false;
+ id.transform_lang_insert_block = NULL;
/* Make sure not to unshare trees behind the front-end's back
since front-end specific mechanisms may rely on sharing. */
return type;
}
-
-/* Return whether it is safe to inline a function because it used different
- target specific options or call site actual types mismatch parameter types.
- E is the call edge to be checked. */
-bool
-tree_can_inline_p (struct cgraph_edge *e)
-{
-#if 0
- /* This causes a regression in SPEC in that it prevents a cold function from
- inlining a hot function. Perhaps this should only apply to functions
- that the user declares hot/cold/optimize explicitly. */
-
- /* Don't inline a function with a higher optimization level than the
- caller, or with different space constraints (hot/cold functions). */
- tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller);
- tree callee_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee);
-
- if (caller_tree != callee_tree)
- {
- struct cl_optimization *caller_opt
- = TREE_OPTIMIZATION ((caller_tree)
- ? caller_tree
- : optimization_default_node);
-
- struct cl_optimization *callee_opt
- = TREE_OPTIMIZATION ((callee_tree)
- ? callee_tree
- : optimization_default_node);
-
- if ((caller_opt->optimize > callee_opt->optimize)
- || (caller_opt->optimize_size != callee_opt->optimize_size))
- return false;
- }
-#endif
- tree caller, callee, lhs;
-
- caller = e->caller->decl;
- callee = e->callee->decl;
-
- /* First check that inlining isn't simply forbidden in this case. */
- if (inline_forbidden_into_p (caller, callee))
- {
- e->inline_failed = CIF_UNSPECIFIED;
- if (e->call_stmt)
- gimple_call_set_cannot_inline (e->call_stmt, true);
- return false;
- }
-
- /* Allow the backend to decide if inlining is ok. */
- if (!targetm.target_option.can_inline_p (caller, callee))
- {
- e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
- if (e->call_stmt)
- gimple_call_set_cannot_inline (e->call_stmt, true);
- e->call_stmt_cannot_inline_p = true;
- return false;
- }
-
- /* Do not inline calls where we cannot triviall work around mismatches
- in argument or return types. */
- if (e->call_stmt
- && ((DECL_RESULT (callee)
- && !DECL_BY_REFERENCE (DECL_RESULT (callee))
- && (lhs = gimple_call_lhs (e->call_stmt)) != NULL_TREE
- && !useless_type_conversion_p (TREE_TYPE (DECL_RESULT (callee)),
- TREE_TYPE (lhs))
- && !fold_convertible_p (TREE_TYPE (DECL_RESULT (callee)), lhs))
- || !gimple_check_call_args (e->call_stmt)))
- {
- e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
- if (e->call_stmt)
- gimple_call_set_cannot_inline (e->call_stmt, true);
- e->call_stmt_cannot_inline_p = true;
- return false;
- }
-
- return true;
-}