#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h" /* floor_log2 */
#include "diagnostic-core.h"
#include "tree.h"
#include "tree-inline.h"
{
gimple_stmt_iterator gsi = gsi_last_bb (id->entry_bb);
gimple init_stmt;
+ tree zero = build_zero_cst (TREE_TYPE (new_tree));
- init_stmt = gimple_build_assign (new_tree,
- fold_convert (TREE_TYPE (new_tree),
- integer_zero_node));
+ init_stmt = gimple_build_assign (new_tree, zero);
gsi_insert_after (&gsi, init_stmt, GSI_NEW_STMT);
SSA_NAME_IS_DEFAULT_DEF (new_tree) = 0;
}
walk_tree (&DECL_QUALIFIER (t), copy_tree_body_r, id, NULL);
}
- if (cfun && gimple_in_ssa_p (cfun)
- && (TREE_CODE (t) == VAR_DECL
- || TREE_CODE (t) == RESULT_DECL || TREE_CODE (t) == PARM_DECL))
- {
- get_var_ann (t);
- add_referenced_var (t);
- }
+ if ((TREE_CODE (t) == VAR_DECL
+ || TREE_CODE (t) == RESULT_DECL
+ || TREE_CODE (t) == PARM_DECL)
+ && id->src_fn && DECL_STRUCT_FUNCTION (id->src_fn)
+ && gimple_referenced_vars (DECL_STRUCT_FUNCTION (id->src_fn))
+ /* We don't want to mark as referenced VAR_DECLs that were
+ not marked as such in the src function. */
+ && (TREE_CODE (decl) != VAR_DECL
+ || referenced_var_lookup (DECL_STRUCT_FUNCTION (id->src_fn),
+ DECL_UID (decl))))
+ add_referenced_var (t);
return t;
}
{
tree f, nf = NULL;
- for (f = TYPE_FIELDS (new_tree); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (new_tree); f ; f = DECL_CHAIN (f))
{
t = remap_decl (f, id);
DECL_CONTEXT (t) = new_tree;
- TREE_CHAIN (t) = nf;
+ DECL_CHAIN (t) = nf;
nf = t;
}
TYPE_FIELDS (new_tree) = nreverse (nf);
tree new_decls = NULL_TREE;
/* Remap its variables. */
- for (old_var = decls; old_var; old_var = TREE_CHAIN (old_var))
+ for (old_var = decls; old_var; old_var = DECL_CHAIN (old_var))
{
tree new_var;
else
{
gcc_assert (DECL_P (new_var));
- TREE_CHAIN (new_var) = new_decls;
+ DECL_CHAIN (new_var) = new_decls;
new_decls = new_var;
/* Also copy value-expressions. */
{
tree stmt = tsi_stmt (oi);
if (TREE_CODE (stmt) == STATEMENT_LIST)
+ /* This copy is not redundant; tsi_link_after will smash this
+ STATEMENT_LIST into the end of the one we're building, and we
+ don't want to do that with the original. */
copy_statement_list (&stmt);
tsi_link_after (&ni, stmt, TSI_CONTINUE_LINKING);
}
{
/* Otherwise, just copy the node. Note that copy_tree_r already
knows not to copy VAR_DECLs, etc., so this is safe. */
+
+ /* We should never have TREE_BLOCK set on non-statements. */
+ if (EXPR_P (*tp))
+ gcc_assert (!TREE_BLOCK (*tp));
+
if (TREE_CODE (*tp) == MEM_REF)
{
- /* We need to re-canonicalize MEM_REFs from inline substitutions
- that can happen when a pointer argument is an ADDR_EXPR. */
- tree decl = TREE_OPERAND (*tp, 0);
- tree *n;
-
- /* See remap_ssa_name. */
- if (TREE_CODE (decl) == SSA_NAME
- && TREE_CODE (SSA_NAME_VAR (decl)) == RESULT_DECL
- && id->transform_return_to_modify)
- decl = SSA_NAME_VAR (decl);
+ tree ptr = TREE_OPERAND (*tp, 0);
+ tree type = remap_type (TREE_TYPE (*tp), id);
+ tree old = *tp;
+ tree tem;
- n = (tree *) pointer_map_contains (id->decl_map, decl);
- if (n)
+ /* We need to re-canonicalize MEM_REFs from inline substitutions
+ that can happen when a pointer argument is an ADDR_EXPR.
+ Recurse here manually to allow that. */
+ walk_tree (&ptr, remap_gimple_op_r, data, NULL);
+ if ((tem = maybe_fold_offset_to_reference (EXPR_LOCATION (*tp),
+ ptr,
+ TREE_OPERAND (*tp, 1),
+ type))
+ && TREE_THIS_VOLATILE (tem) == TREE_THIS_VOLATILE (old))
{
- tree old = *tp;
- tree ptr = unshare_expr (*n);
- tree tem;
- if ((tem = maybe_fold_offset_to_reference (EXPR_LOCATION (*tp),
- ptr,
- TREE_OPERAND (*tp, 1),
- TREE_TYPE (*tp)))
- && TREE_THIS_VOLATILE (tem) == TREE_THIS_VOLATILE (old))
- {
- tree *tem_basep = &tem;
- while (handled_component_p (*tem_basep))
- tem_basep = &TREE_OPERAND (*tem_basep, 0);
- if (TREE_CODE (*tem_basep) == MEM_REF)
- *tem_basep
- = build2 (MEM_REF, TREE_TYPE (*tem_basep),
- TREE_OPERAND (*tem_basep, 0),
- fold_convert (TREE_TYPE (TREE_OPERAND (*tp, 1)),
- TREE_OPERAND (*tem_basep, 1)));
- else
- *tem_basep
- = build2 (MEM_REF, TREE_TYPE (*tem_basep),
- build_fold_addr_expr (*tem_basep),
- build_int_cst
- (TREE_TYPE (TREE_OPERAND (*tp, 1)), 0));
- *tp = tem;
- }
+ tree *tem_basep = &tem;
+ while (handled_component_p (*tem_basep))
+ tem_basep = &TREE_OPERAND (*tem_basep, 0);
+ if (TREE_CODE (*tem_basep) == MEM_REF)
+ *tem_basep
+ = build2 (MEM_REF, TREE_TYPE (*tem_basep),
+ TREE_OPERAND (*tem_basep, 0),
+ fold_convert (TREE_TYPE (TREE_OPERAND (*tp, 1)),
+ TREE_OPERAND (*tem_basep, 1)));
else
- {
- *tp = fold_build2 (MEM_REF, TREE_TYPE (*tp),
- ptr, TREE_OPERAND (*tp, 1));
- TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
- }
- TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
- *walk_subtrees = 0;
- return NULL;
+ *tem_basep
+ = build2 (MEM_REF, TREE_TYPE (*tem_basep),
+ build_fold_addr_expr (*tem_basep),
+ build_int_cst
+ (TREE_TYPE (TREE_OPERAND (*tp, 1)), 0));
+ *tp = tem;
}
+ else
+ {
+ *tp = fold_build2 (MEM_REF, type,
+ ptr, TREE_OPERAND (*tp, 1));
+ TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
+ TREE_THIS_NOTRAP (*tp) = TREE_THIS_NOTRAP (old);
+ }
+ TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+ *walk_subtrees = 0;
+ return NULL;
}
/* Here is the "usual case". Copy this tree node, and then
tweak some special cases. */
copy_tree_r (tp, walk_subtrees, NULL);
+ if (TREE_CODE (*tp) != OMP_CLAUSE)
+ TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
+
/* Global variables we haven't seen yet need to go into referenced
vars. If not referenced from types only. */
if (gimple_in_ssa_p (cfun)
&& !processing_debug_stmt)
add_referenced_var (*tp);
- /* We should never have TREE_BLOCK set on non-statements. */
- if (EXPR_P (*tp))
- gcc_assert (!TREE_BLOCK (*tp));
-
- if (TREE_CODE (*tp) != OMP_CLAUSE)
- TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id);
-
if (TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3))
{
/* The copied TARGET_EXPR has never been expanded, even if the
*tp = build1 (INDIRECT_REF, type, new_tree);
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
+ TREE_READONLY (*tp) = TREE_READONLY (old);
+ TREE_THIS_NOTRAP (*tp) = TREE_THIS_NOTRAP (old);
}
}
*walk_subtrees = 0;
old_nr = tree_low_cst (old_t_nr, 0);
new_nr = remap_eh_region_nr (old_nr, id);
- return build_int_cst (NULL, new_nr);
+ return build_int_cst (integer_type_node, new_nr);
}
/* Helper for copy_bb. Remap statement STMT using the inlining
tree new_rhs;
new_rhs = force_gimple_operand_gsi (&seq_gsi,
gimple_assign_rhs1 (stmt),
- true, NULL, false, GSI_NEW_STMT);
+ true, NULL, false,
+ GSI_CONTINUE_LINKING);
gimple_assign_set_rhs1 (stmt, new_rhs);
id->regimplify = false;
}
size_t nargs = gimple_call_num_args (id->gimple_call);
size_t n;
- for (p = DECL_ARGUMENTS (id->src_fn); p; p = TREE_CHAIN (p))
+ for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
nargs--;
/* Create the new array of arguments. */
tree count, p;
gimple new_stmt;
- for (p = DECL_ARGUMENTS (id->src_fn); p; p = TREE_CHAIN (p))
+ for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
nargs--;
count = build_int_cst (integer_type_node, nargs);
edge = cgraph_clone_edge (edge, id->dst_node, stmt,
gimple_uid (stmt),
REG_BR_PROB_BASE, CGRAPH_FREQ_BASE,
- edge->frequency, true);
+ true);
/* We could also just rescale the frequency, but
doing so would introduce roundoff errors and make
verifier unhappy. */
if ((!edge
|| (edge->indirect_inlining_edge
&& id->transform_call_graph_edges == CB_CGE_MOVE_CLONES))
+ && id->dst_node->analyzed
&& (fn = gimple_call_fndecl (stmt)) != NULL)
{
- struct cgraph_node *dest = cgraph_node (fn);
+ struct cgraph_node *dest = cgraph_get_node (fn);
/* We have missing edge in the callgraph. This can happen
when previous inlining turned an indirect call into a
most common reason for missing edges). */
gcc_assert (dest->needed || !dest->analyzed
|| dest->address_taken
- || !id->src_node->analyzed);
+ || !id->src_node->analyzed
+ || !id->dst_node->analyzed);
if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES)
cgraph_create_edge_including_clones
(id->dst_node, dest, orig_stmt, stmt, bb->count,
compute_call_stmt_bb_frequency (id->dst_node->decl,
copy_basic_block),
- bb->loop_depth, CIF_ORIGINALLY_INDIRECT_CALL);
+ CIF_ORIGINALLY_INDIRECT_CALL);
else
cgraph_create_edge (id->dst_node, dest, stmt,
bb->count,
compute_call_stmt_bb_frequency
- (id->dst_node->decl, copy_basic_block),
- bb->loop_depth)->inline_failed
+ (id->dst_node->decl, copy_basic_block))->inline_failed
= CIF_ORIGINALLY_INDIRECT_CALL;
if (dump_file)
{
edge_iterator ei;
gimple phi;
gimple_stmt_iterator si;
+ edge new_edge;
+ bool inserted = false;
for (si = gsi_start (phi_nodes (bb)); !gsi_end_p (si); gsi_next (&si))
{
tree res, new_res;
gimple new_phi;
- edge new_edge;
phi = gsi_stmt (si);
res = PHI_RESULT (phi);
{
gimple_seq stmts = NULL;
new_arg = force_gimple_operand (new_arg, &stmts, true, NULL);
- gsi_insert_seq_on_edge_immediate (new_edge, stmts);
+ gsi_insert_seq_on_edge (new_edge, stmts);
+ inserted = true;
}
add_phi_arg (new_phi, new_arg, new_edge,
gimple_phi_arg_location_from_edge (phi, old_edge));
}
}
}
+
+ /* Commit the delayed edge insertions. */
+ if (inserted)
+ FOR_EACH_EDGE (new_edge, ei, new_bb->preds)
+ gsi_commit_one_edge_insert (new_edge, NULL);
}
cfun->va_list_fpr_size = src_cfun->va_list_fpr_size;
cfun->has_nonlocal_label = src_cfun->has_nonlocal_label;
cfun->stdarg = src_cfun->stdarg;
- cfun->dont_save_pending_sizes_p = src_cfun->dont_save_pending_sizes_p;
cfun->after_inlining = src_cfun->after_inlining;
cfun->can_throw_non_call_exceptions
= src_cfun->can_throw_non_call_exceptions;
if (!id->debug_stmts)
return;
- for (i = 0; VEC_iterate (gimple, id->debug_stmts, i, stmt); i++)
+ FOR_EACH_VEC_ELT (gimple, id->debug_stmts, i, stmt)
copy_debug_stmt (stmt, id);
VEC_free (gimple, heap, id->debug_stmts);
/* We're actually using the newly-created var. */
if (gimple_in_ssa_p (cfun) && TREE_CODE (var) == VAR_DECL)
- {
- get_var_ann (var);
- add_referenced_var (var);
- }
+ add_referenced_var (var);
/* Declare this new variable. */
- TREE_CHAIN (var) = *vars;
+ DECL_CHAIN (var) = *vars;
*vars = var;
/* Make gimplifier happy about this variable. */
/* Loop through the parameter declarations, replacing each with an
equivalent VAR_DECL, appropriately initialized. */
- for (p = parms, i = 0; p; p = TREE_CHAIN (p), i++)
+ for (p = parms, i = 0; p; p = DECL_CHAIN (p), i++)
{
tree val;
val = i < gimple_call_num_args (stmt) ? gimple_call_arg (stmt, i) : NULL;
in a second loop over all parameters to appropriately remap
variable sized arrays when the size is specified in a
parameter following the array. */
- for (p = parms, i = 0; p; p = TREE_CHAIN (p), i++)
+ for (p = parms, i = 0; p; p = DECL_CHAIN (p), i++)
{
tree *varp = (tree *) pointer_map_contains (id->decl_map, p);
if (varp
basic_block entry_bb)
{
tree callee = id->src_fn;
- tree caller = id->dst_fn;
tree result = DECL_RESULT (callee);
tree callee_type = TREE_TYPE (result);
tree caller_type;
var = copy_result_decl_to_var (result, id);
if (gimple_in_ssa_p (cfun))
- {
- get_var_ann (var);
- add_referenced_var (var);
- }
+ add_referenced_var (var);
DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
- add_local_decl (DECL_STRUCT_FUNCTION (caller), var);
/* Do not have the rest of GCC warn about this variable as it should
not be visible to the user. */
&& !is_gimple_val (var))
{
tree temp = create_tmp_var (TREE_TYPE (result), "retvalptr");
+ if (gimple_in_ssa_p (id->src_cfun))
+ add_referenced_var (temp);
insert_decl_map (id, result, temp);
- temp = remap_ssa_name (gimple_default_def (id->src_cfun, result), id);
+ /* When RESULT_DECL is in SSA form, we need to use it's default_def
+ SSA_NAME. */
+ if (gimple_in_ssa_p (id->src_cfun) && gimple_default_def (id->src_cfun, result))
+ temp = remap_ssa_name (gimple_default_def (id->src_cfun, result), id);
insert_init_stmt (id, entry_bb, gimple_build_assign (temp, var));
}
else
this may change program's memory overhead drastically when the
function using alloca is called in loop. In GCC present in
SPEC2000 inlining into schedule_block cause it to require 2GB of
- RAM instead of 256MB. */
+ RAM instead of 256MB. Don't do so for alloca calls emitted for
+ VLA objects as those can't cause unbounded growth (they're always
+ wrapped inside stack_save/stack_restore regions. */
if (gimple_alloca_call_p (stmt)
+ && !gimple_call_alloca_for_var_p (stmt)
&& !lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn)))
{
inline_forbidden_reason
return forbidden_p;
}
-/* Return true if CALLEE cannot be inlined into CALLER. */
-
-static bool
-inline_forbidden_into_p (tree caller, tree callee)
-{
- /* Don't inline if the functions have different EH personalities. */
- if (DECL_FUNCTION_PERSONALITY (caller)
- && DECL_FUNCTION_PERSONALITY (callee)
- && (DECL_FUNCTION_PERSONALITY (caller)
- != DECL_FUNCTION_PERSONALITY (callee)))
- return true;
-
- /* Don't inline if the callee can throw non-call exceptions but the
- caller cannot. */
- if (DECL_STRUCT_FUNCTION (callee)
- && DECL_STRUCT_FUNCTION (callee)->can_throw_non_call_exceptions
- && !(DECL_STRUCT_FUNCTION (caller)
- && DECL_STRUCT_FUNCTION (caller)->can_throw_non_call_exceptions))
- return true;
-
- return false;
-}
-
/* Returns nonzero if FN is a function that does not have any
fundamental inline blocking properties. */
inlinable = false;
}
- /* Don't auto-inline anything that might not be bound within
- this unit of translation. */
- else if (!DECL_DECLARED_INLINE_P (fn)
- && DECL_REPLACEABLE_P (fn))
- inlinable = false;
-
else if (!function_attribute_inlinable_p (fn))
{
if (do_warning)
gcc_assert (!VOID_TYPE_P (type));
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ enum machine_mode inner = TYPE_MODE (TREE_TYPE (type));
+ enum machine_mode simd
+ = targetm.vectorize.preferred_simd_mode (inner);
+ int simd_mode_size = GET_MODE_SIZE (simd);
+ return ((GET_MODE_SIZE (TYPE_MODE (type)) + simd_mode_size - 1)
+ / simd_mode_size);
+ }
+
size = int_size_in_bytes (type);
if (size < 0 || size > MOVE_MAX_PIECES * MOVE_RATIO (!optimize_size))
CASE_CONVERT:
case COMPLEX_EXPR:
case PAREN_EXPR:
+ case VIEW_CONVERT_EXPR:
return 0;
/* Assign cost of 1 to usual operations.
case POINTER_PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
+ case FMA_EXPR:
case ADDR_SPACE_CONVERT_EXPR:
case FIXED_CONVERT_EXPR:
case GIMPLE_CALL:
{
tree decl = gimple_call_fndecl (stmt);
- tree addr = gimple_call_fn (stmt);
- tree funtype = TREE_TYPE (addr);
- bool stdarg = false;
-
- if (POINTER_TYPE_P (funtype))
- funtype = TREE_TYPE (funtype);
-
- if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_MD)
- cost = weights->target_builtin_call_cost;
- else
- cost = weights->call_cost;
-
- if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
- switch (DECL_FUNCTION_CODE (decl))
- {
- /* Builtins that expand to constants. */
- case BUILT_IN_CONSTANT_P:
- case BUILT_IN_EXPECT:
- case BUILT_IN_OBJECT_SIZE:
- case BUILT_IN_UNREACHABLE:
- /* Simple register moves or loads from stack. */
- case BUILT_IN_RETURN_ADDRESS:
- case BUILT_IN_EXTRACT_RETURN_ADDR:
- case BUILT_IN_FROB_RETURN_ADDR:
- case BUILT_IN_RETURN:
- case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
- case BUILT_IN_FRAME_ADDRESS:
- case BUILT_IN_VA_END:
- case BUILT_IN_STACK_SAVE:
- case BUILT_IN_STACK_RESTORE:
- /* Exception state returns or moves registers around. */
- case BUILT_IN_EH_FILTER:
- case BUILT_IN_EH_POINTER:
- case BUILT_IN_EH_COPY_VALUES:
- return 0;
-
- /* builtins that are not expensive (that is they are most probably
- expanded inline into resonably simple code). */
- case BUILT_IN_ABS:
- case BUILT_IN_ALLOCA:
- case BUILT_IN_BSWAP32:
- case BUILT_IN_BSWAP64:
- case BUILT_IN_CLZ:
- case BUILT_IN_CLZIMAX:
- case BUILT_IN_CLZL:
- case BUILT_IN_CLZLL:
- case BUILT_IN_CTZ:
- case BUILT_IN_CTZIMAX:
- case BUILT_IN_CTZL:
- case BUILT_IN_CTZLL:
- case BUILT_IN_FFS:
- case BUILT_IN_FFSIMAX:
- case BUILT_IN_FFSL:
- case BUILT_IN_FFSLL:
- case BUILT_IN_IMAXABS:
- case BUILT_IN_FINITE:
- case BUILT_IN_FINITEF:
- case BUILT_IN_FINITEL:
- case BUILT_IN_FINITED32:
- case BUILT_IN_FINITED64:
- case BUILT_IN_FINITED128:
- case BUILT_IN_FPCLASSIFY:
- case BUILT_IN_ISFINITE:
- case BUILT_IN_ISINF_SIGN:
- case BUILT_IN_ISINF:
- case BUILT_IN_ISINFF:
- case BUILT_IN_ISINFL:
- case BUILT_IN_ISINFD32:
- case BUILT_IN_ISINFD64:
- case BUILT_IN_ISINFD128:
- case BUILT_IN_ISNAN:
- case BUILT_IN_ISNANF:
- case BUILT_IN_ISNANL:
- case BUILT_IN_ISNAND32:
- case BUILT_IN_ISNAND64:
- case BUILT_IN_ISNAND128:
- case BUILT_IN_ISNORMAL:
- case BUILT_IN_ISGREATER:
- case BUILT_IN_ISGREATEREQUAL:
- case BUILT_IN_ISLESS:
- case BUILT_IN_ISLESSEQUAL:
- case BUILT_IN_ISLESSGREATER:
- case BUILT_IN_ISUNORDERED:
- case BUILT_IN_VA_ARG_PACK:
- case BUILT_IN_VA_ARG_PACK_LEN:
- case BUILT_IN_VA_COPY:
- case BUILT_IN_TRAP:
- case BUILT_IN_SAVEREGS:
- case BUILT_IN_POPCOUNTL:
- case BUILT_IN_POPCOUNTLL:
- case BUILT_IN_POPCOUNTIMAX:
- case BUILT_IN_POPCOUNT:
- case BUILT_IN_PARITYL:
- case BUILT_IN_PARITYLL:
- case BUILT_IN_PARITYIMAX:
- case BUILT_IN_PARITY:
- case BUILT_IN_LABS:
- case BUILT_IN_LLABS:
- case BUILT_IN_PREFETCH:
- cost = weights->target_builtin_call_cost;
- break;
-
- default:
- break;
- }
-
- if (decl)
- funtype = TREE_TYPE (decl);
-
- if (!VOID_TYPE_P (TREE_TYPE (funtype)))
- cost += estimate_move_cost (TREE_TYPE (funtype));
-
- if (funtype)
- stdarg = stdarg_p (funtype);
-
- /* Our cost must be kept in sync with
- cgraph_estimate_size_after_inlining that does use function
- declaration to figure out the arguments.
-
- For functions taking variable list of arguments we must
- look into call statement intself. This is safe because
- we will get only higher costs and in most cases we will
- not inline these anyway. */
- if (decl && DECL_ARGUMENTS (decl) && !stdarg)
+ struct cgraph_node *node;
+
+ /* Do not special case builtins where we see the body.
+ This just confuse inliner. */
+ if (!decl || !(node = cgraph_get_node (decl)) || node->analyzed)
+ ;
+ /* For buitins that are likely expanded to nothing or
+ inlined do not account operand costs. */
+ else if (is_simple_builtin (decl))
+ return 0;
+ else if (is_inexpensive_builtin (decl))
+ return weights->target_builtin_call_cost;
+ else if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
{
- tree arg;
- for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg))
- if (!VOID_TYPE_P (TREE_TYPE (arg)))
- cost += estimate_move_cost (TREE_TYPE (arg));
- }
- else if (funtype && prototype_p (funtype) && !stdarg)
- {
- tree t;
- for (t = TYPE_ARG_TYPES (funtype); t && t != void_list_node;
- t = TREE_CHAIN (t))
- if (!VOID_TYPE_P (TREE_VALUE (t)))
- cost += estimate_move_cost (TREE_VALUE (t));
- }
- else
- {
- for (i = 0; i < gimple_call_num_args (stmt); i++)
+ /* We canonicalize x * x to pow (x, 2.0) with -ffast-math, so
+ specialize the cheap expansion we do here.
+ ??? This asks for a more general solution. */
+ switch (DECL_FUNCTION_CODE (decl))
{
- tree arg = gimple_call_arg (stmt, i);
- if (!VOID_TYPE_P (TREE_TYPE (arg)))
- cost += estimate_move_cost (TREE_TYPE (arg));
+ case BUILT_IN_POW:
+ case BUILT_IN_POWF:
+ case BUILT_IN_POWL:
+ if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
+ && REAL_VALUES_EQUAL
+ (TREE_REAL_CST (gimple_call_arg (stmt, 1)), dconst2))
+ return estimate_operator_cost (MULT_EXPR, weights,
+ gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 0));
+ break;
+
+ default:
+ break;
}
}
+ cost = weights->call_cost;
+ if (gimple_call_lhs (stmt))
+ cost += estimate_move_cost (TREE_TYPE (gimple_call_lhs (stmt)));
+ for (i = 0; i < gimple_call_num_args (stmt); i++)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ cost += estimate_move_cost (TREE_TYPE (arg));
+ }
break;
}
+ case GIMPLE_RETURN:
+ return weights->return_cost;
+
case GIMPLE_GOTO:
case GIMPLE_LABEL:
case GIMPLE_NOP:
case GIMPLE_PHI:
- case GIMPLE_RETURN:
case GIMPLE_PREDICT:
case GIMPLE_DEBUG:
return 0;
eni_size_weights.div_mod_cost = 1;
eni_size_weights.omp_cost = 40;
eni_size_weights.time_based = false;
+ eni_size_weights.return_cost = 1;
/* Estimating time for call is difficult, since we have no idea what the
called function does. In the current uses of eni_time_weights,
underestimating the cost does less harm than overestimating it, so
we choose a rather small value here. */
eni_time_weights.call_cost = 10;
- eni_time_weights.target_builtin_call_cost = 10;
+ eni_time_weights.target_builtin_call_cost = 1;
eni_time_weights.div_mod_cost = 10;
eni_time_weights.omp_cost = 40;
eni_time_weights.time_based = true;
+ eni_time_weights.return_cost = 2;
}
/* Estimate the number of instructions in a gimple_seq. */
add_local_decl (caller, var);
}
else if (!can_be_nonlocal (var, id))
- add_local_decl (caller, remap_decl (var, id));
-}
-
-/* Fetch callee declaration from the call graph edge going from NODE and
- associated with STMR call statement. Return NULL_TREE if not found. */
-static tree
-get_indirect_callee_fndecl (struct cgraph_node *node, gimple stmt)
-{
- struct cgraph_edge *cs;
-
- cs = cgraph_edge (node, stmt);
- if (cs && !cs->indirect_unknown_callee)
- return cs->callee->decl;
+ {
+ tree new_var = remap_decl (var, id);
- return NULL_TREE;
+ /* Remap debug-expressions. */
+ if (TREE_CODE (new_var) == VAR_DECL
+ && DECL_DEBUG_EXPR_IS_FROM (new_var)
+ && new_var != var)
+ {
+ tree tem = DECL_DEBUG_EXPR (var);
+ bool old_regimplify = id->regimplify;
+ id->remapping_type_depth++;
+ walk_tree (&tem, copy_tree_body_r, id, NULL);
+ id->remapping_type_depth--;
+ id->regimplify = old_regimplify;
+ SET_DECL_DEBUG_EXPR (new_var, tem);
+ }
+ add_local_decl (caller, new_var);
+ }
}
/* If STMT is a GIMPLE_CALL, replace it with its inline expansion. */
if (gimple_code (stmt) != GIMPLE_CALL)
goto egress;
+ cg_edge = cgraph_edge (id->dst_node, stmt);
+ gcc_checking_assert (cg_edge);
/* First, see if we can figure out what function is being called.
If we cannot, then there is no hope of inlining the function. */
- fn = gimple_call_fndecl (stmt);
- if (!fn)
- {
- fn = get_indirect_callee_fndecl (id->dst_node, stmt);
- if (!fn)
- goto egress;
- }
-
- /* Turn forward declarations into real ones. */
- fn = cgraph_node (fn)->decl;
+ if (cg_edge->indirect_unknown_callee)
+ goto egress;
+ fn = cg_edge->callee->decl;
+ gcc_checking_assert (fn);
/* If FN is a declaration of a function in a nested scope that was
globally declared inline, we don't set its DECL_INITIAL.
&& gimple_has_body_p (DECL_ABSTRACT_ORIGIN (fn)))
fn = DECL_ABSTRACT_ORIGIN (fn);
- /* Objective C and fortran still calls tree_rest_of_compilation directly.
- Kill this check once this is fixed. */
- if (!id->dst_node->analyzed)
- goto egress;
-
- cg_edge = cgraph_edge (id->dst_node, stmt);
-
- /* First check that inlining isn't simply forbidden in this case. */
- if (inline_forbidden_into_p (cg_edge->caller->decl, cg_edge->callee->decl))
- goto egress;
-
/* Don't try to inline functions that are not well-suited to inlining. */
if (!cgraph_inline_p (cg_edge, &reason))
{
_(cgraph_inline_failed_string (reason)));
sorry ("called from here");
}
- else if (warn_inline && DECL_DECLARED_INLINE_P (fn)
+ else if (warn_inline
+ && DECL_DECLARED_INLINE_P (fn)
+ && !DECL_NO_INLINE_WARNING_P (fn)
&& !DECL_IN_SYSTEM_HEADER (fn)
&& reason != CIF_UNSPECIFIED
&& !lookup_attribute ("noinline", DECL_ATTRIBUTES (fn))
+ /* Do not warn about not inlined recursive calls. */
+ && !cgraph_edge_recursive_p (cg_edge)
/* Avoid warnings during early inline pass. */
&& cgraph_global_info_ready)
{
}
if (purge_dead_abnormal_edges)
- gimple_purge_dead_abnormal_call_edges (return_block);
+ {
+ gimple_purge_dead_eh_edges (return_block);
+ gimple_purge_dead_abnormal_call_edges (return_block);
+ }
/* If the value of the new expression is ignored, that's OK. We
don't warn about this for CALL_EXPRs, so we shouldn't warn about
basic_block bb;
int last = n_basic_blocks;
struct gimplify_ctx gctx;
+ bool inlined_p = false;
/* There is no point in performing inlining if errors have already
occurred -- and we might crash if we try to inline invalid
/* Clear out ID. */
memset (&id, 0, sizeof (id));
- id.src_node = id.dst_node = cgraph_node (fn);
+ id.src_node = id.dst_node = cgraph_get_node (fn);
+ gcc_assert (id.dst_node->analyzed);
id.dst_fn = fn;
/* Or any functions that aren't finished yet. */
if (current_function_decl)
follow it; we'll trudge through them, processing their CALL_EXPRs
along the way. */
FOR_EACH_BB (bb)
- gimple_expand_calls_inline (bb, &id);
+ inlined_p |= gimple_expand_calls_inline (bb, &id);
pop_gimplify_context (NULL);
}
#endif
- /* Fold the statements before compacting/renumbering the basic blocks. */
+ /* Fold queued statements. */
fold_marked_statements (last, id.statements_to_fold);
pointer_set_destroy (id.statements_to_fold);
gcc_assert (!id.debug_stmts);
- /* Renumber the (code) basic_blocks consecutively. */
- compact_blocks ();
+ /* If we didn't inline into the function there is nothing to do. */
+ if (!inlined_p)
+ return 0;
+
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (fn);
- fold_cond_expr_cond ();
delete_unreachable_blocks_update_callgraph (&id);
#ifdef ENABLE_CHECKING
verify_cgraph_node (id.dst_node);
return (TODO_update_ssa
| TODO_cleanup_cfg
| (gimple_in_ssa_p (cfun) ? TODO_remove_unused_locals : 0)
+ | (gimple_in_ssa_p (cfun) ? TODO_update_address_taken : 0)
| (profile_status != PROFILE_ABSENT ? TODO_rebuild_frequencies : 0));
}
here. */
tree chain = NULL_TREE, new_tree;
- chain = TREE_CHAIN (*tp);
+ if (CODE_CONTAINS_STRUCT (code, TS_COMMON))
+ chain = TREE_CHAIN (*tp);
/* Copy the node. */
new_tree = copy_node (*tp);
CONSTRUCTOR_ELTS (*tp));
*tp = new_tree;
}
+ else if (code == STATEMENT_LIST)
+ /* We used to just abort on STATEMENT_LIST, but we can run into them
+ with statement-expressions (c++/40975). */
+ copy_statement_list (tp);
else if (TREE_CODE_CLASS (code) == tcc_type)
*walk_subtrees = 0;
else if (TREE_CODE_CLASS (code) == tcc_declaration)
*walk_subtrees = 0;
else if (TREE_CODE_CLASS (code) == tcc_constant)
*walk_subtrees = 0;
- else
- gcc_assert (code != STATEMENT_LIST);
return NULL_TREE;
}
declare_inline_vars (tree block, tree vars)
{
tree t;
- for (t = vars; t; t = TREE_CHAIN (t))
+ for (t = vars; t; t = DECL_CHAIN (t))
{
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
gcc_assert (!TREE_STATIC (t) && !TREE_ASM_WRITTEN (t));
new function. */
DECL_CONTEXT (copy) = id->dst_fn;
+ if (TREE_CODE (decl) == VAR_DECL
+ /* C++ clones functions during parsing, before
+ referenced_vars. */
+ && gimple_referenced_vars (DECL_STRUCT_FUNCTION (id->src_fn))
+ && referenced_var_lookup (DECL_STRUCT_FUNCTION (id->src_fn),
+ DECL_UID (decl)))
+ add_referenced_var (copy);
+
return copy;
}
parg = &new_parm;
- for (arg = orig_parm; arg; arg = TREE_CHAIN (arg), i++)
+ for (arg = orig_parm; arg; arg = DECL_CHAIN (arg), i++)
if (!args_to_skip || !bitmap_bit_p (args_to_skip, i))
{
tree new_tree = remap_decl (arg, id);
lang_hooks.dup_lang_specific_decl (new_tree);
*parg = new_tree;
- parg = &TREE_CHAIN (new_tree);
+ parg = &DECL_CHAIN (new_tree);
}
else if (!pointer_map_contains (id->decl_map, arg))
{
as temporary variable later in function, the uses will be
replaced by local variable. */
tree var = copy_decl_to_var (arg, id);
- get_var_ann (var);
add_referenced_var (var);
insert_decl_map (id, arg, var);
/* Declare this new variable. */
- TREE_CHAIN (var) = *vars;
+ DECL_CHAIN (var) = *vars;
*vars = var;
}
return new_parm;
tree *chain_copy, *pvar;
chain_copy = &static_chain;
- for (pvar = chain_copy; *pvar; pvar = &TREE_CHAIN (*pvar))
+ for (pvar = chain_copy; *pvar; pvar = &DECL_CHAIN (*pvar))
{
tree new_tree = remap_decl (*pvar, id);
lang_hooks.dup_lang_specific_decl (new_tree);
- TREE_CHAIN (new_tree) = TREE_CHAIN (*pvar);
+ DECL_CHAIN (new_tree) = DECL_CHAIN (*pvar);
*pvar = new_tree;
}
return static_chain;
}
}
- if (changed)
- tidy_fallthru_edges ();
return changed;
}
&& TREE_CODE (new_decl) == FUNCTION_DECL);
DECL_POSSIBLY_INLINED (old_decl) = 1;
- old_version_node = cgraph_node (old_decl);
- new_version_node = cgraph_node (new_decl);
+ old_version_node = cgraph_get_node (old_decl);
+ gcc_checking_assert (old_version_node);
+ new_version_node = cgraph_get_node (new_decl);
+ gcc_checking_assert (new_version_node);
/* Output the inlining info for this abstract function, since it has been
inlined. If we don't do this now, we can lose the information about the
{
int i = replace_info->parm_num;
tree parm;
- for (parm = DECL_ARGUMENTS (old_decl); i; parm = TREE_CHAIN (parm))
+ for (parm = DECL_ARGUMENTS (old_decl); i; parm = DECL_CHAIN (parm))
i --;
replace_info->old_tree = parm;
}
args_to_skip, &vars);
DECL_INITIAL (new_decl) = remap_blocks (DECL_INITIAL (id.src_fn), &id);
-
- /* Renumber the lexical scoping (non-code) blocks consecutively. */
- number_blocks (id.dst_fn);
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (new_decl)) = new_decl;
declare_inline_vars (DECL_INITIAL (new_decl), vars);
/* Add local vars. */
add_local_variables (DECL_STRUCT_FUNCTION (old_decl), cfun, &id, false);
- /* Copy the Function's body. */
- copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
- ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, blocks_to_copy, new_entry);
-
if (DECL_RESULT (old_decl) != NULL_TREE)
{
- tree *res_decl = &DECL_RESULT (old_decl);
- DECL_RESULT (new_decl) = remap_decl (*res_decl, &id);
+ tree old_name;
+ DECL_RESULT (new_decl) = remap_decl (DECL_RESULT (old_decl), &id);
lang_hooks.dup_lang_specific_decl (DECL_RESULT (new_decl));
+ if (gimple_in_ssa_p (id.src_cfun)
+ && DECL_BY_REFERENCE (DECL_RESULT (old_decl))
+ && (old_name
+ = gimple_default_def (id.src_cfun, DECL_RESULT (old_decl))))
+ {
+ tree new_name = make_ssa_name (DECL_RESULT (new_decl), NULL);
+ insert_decl_map (&id, old_name, new_name);
+ SSA_NAME_DEF_STMT (new_name) = gimple_build_nop ();
+ set_default_def (DECL_RESULT (new_decl), new_name);
+ }
}
+ /* Copy the Function's body. */
+ copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, blocks_to_copy, new_entry);
+
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (new_decl);
for (e = new_version_node->callees; e; e = e->next_callee)
{
basic_block bb = gimple_bb (e->call_stmt);
- e->frequency = compute_call_stmt_bb_frequency (current_function_decl, bb);
+ e->frequency = compute_call_stmt_bb_frequency (current_function_decl,
+ bb);
+ e->count = bb->count;
+ }
+ for (e = new_version_node->indirect_calls; e; e = e->next_callee)
+ {
+ basic_block bb = gimple_bb (e->call_stmt);
+ e->frequency = compute_call_stmt_bb_frequency (current_function_decl,
+ bb);
e->count = bb->count;
}
}
/* Remap the parameters. */
for (param = DECL_ARGUMENTS (fn), arg = first_call_expr_arg (exp, &iter);
param;
- param = TREE_CHAIN (param), arg = next_call_expr_arg (&iter))
+ param = DECL_CHAIN (param), arg = next_call_expr_arg (&iter))
*pointer_map_insert (decl_map, param) = arg;
memset (&id, 0, sizeof (id));
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
id.transform_new_cfg = false;
id.transform_return_to_modify = true;
- id.transform_lang_insert_block = false;
+ id.transform_lang_insert_block = NULL;
/* Make sure not to unshare trees behind the front-end's back
since front-end specific mechanisms may rely on sharing. */
return type;
}
-
-/* Return whether it is safe to inline a function because it used different
- target specific options or call site actual types mismatch parameter types.
- E is the call edge to be checked. */
-bool
-tree_can_inline_p (struct cgraph_edge *e)
-{
-#if 0
- /* This causes a regression in SPEC in that it prevents a cold function from
- inlining a hot function. Perhaps this should only apply to functions
- that the user declares hot/cold/optimize explicitly. */
-
- /* Don't inline a function with a higher optimization level than the
- caller, or with different space constraints (hot/cold functions). */
- tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller);
- tree callee_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee);
-
- if (caller_tree != callee_tree)
- {
- struct cl_optimization *caller_opt
- = TREE_OPTIMIZATION ((caller_tree)
- ? caller_tree
- : optimization_default_node);
-
- struct cl_optimization *callee_opt
- = TREE_OPTIMIZATION ((callee_tree)
- ? callee_tree
- : optimization_default_node);
-
- if ((caller_opt->optimize > callee_opt->optimize)
- || (caller_opt->optimize_size != callee_opt->optimize_size))
- return false;
- }
-#endif
- tree caller, callee, lhs;
-
- caller = e->caller->decl;
- callee = e->callee->decl;
-
- /* First check that inlining isn't simply forbidden in this case. */
- if (inline_forbidden_into_p (caller, callee))
- {
- e->inline_failed = CIF_UNSPECIFIED;
- gimple_call_set_cannot_inline (e->call_stmt, true);
- return false;
- }
-
- /* Allow the backend to decide if inlining is ok. */
- if (!targetm.target_option.can_inline_p (caller, callee))
- {
- e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
- gimple_call_set_cannot_inline (e->call_stmt, true);
- e->call_stmt_cannot_inline_p = true;
- return false;
- }
-
- /* Do not inline calls where we cannot triviall work around mismatches
- in argument or return types. */
- if (e->call_stmt
- && ((DECL_RESULT (callee)
- && !DECL_BY_REFERENCE (DECL_RESULT (callee))
- && (lhs = gimple_call_lhs (e->call_stmt)) != NULL_TREE
- && !useless_type_conversion_p (TREE_TYPE (DECL_RESULT (callee)),
- TREE_TYPE (lhs))
- && !fold_convertible_p (TREE_TYPE (DECL_RESULT (callee)), lhs))
- || !gimple_check_call_args (e->call_stmt)))
- {
- e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
- gimple_call_set_cannot_inline (e->call_stmt, true);
- e->call_stmt_cannot_inline_p = true;
- return false;
- }
-
- return true;
-}