#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h" /* floor_log2 */
#include "diagnostic-core.h"
#include "tree.h"
#include "tree-inline.h"
knows not to copy VAR_DECLs, etc., so this is safe. */
if (TREE_CODE (*tp) == MEM_REF)
{
- /* We need to re-canonicalize MEM_REFs from inline substitutions
- that can happen when a pointer argument is an ADDR_EXPR. */
- tree decl = TREE_OPERAND (*tp, 0);
- tree *n;
+ tree ptr = TREE_OPERAND (*tp, 0);
+ tree old = *tp;
+ tree tem;
- /* See remap_ssa_name. */
- if (TREE_CODE (decl) == SSA_NAME
- && TREE_CODE (SSA_NAME_VAR (decl)) == RESULT_DECL
- && id->transform_return_to_modify)
- decl = SSA_NAME_VAR (decl);
-
- n = (tree *) pointer_map_contains (id->decl_map, decl);
- if (n)
+ /* We need to re-canonicalize MEM_REFs from inline substitutions
+ that can happen when a pointer argument is an ADDR_EXPR.
+ Recurse here manually to allow that. */
+ walk_tree (&ptr, remap_gimple_op_r, data, NULL);
+ if ((tem = maybe_fold_offset_to_reference (EXPR_LOCATION (*tp),
+ ptr,
+ TREE_OPERAND (*tp, 1),
+ TREE_TYPE (*tp)))
+ && TREE_THIS_VOLATILE (tem) == TREE_THIS_VOLATILE (old))
{
- tree old = *tp;
- tree ptr = unshare_expr (*n);
- tree tem;
- if ((tem = maybe_fold_offset_to_reference (EXPR_LOCATION (*tp),
- ptr,
- TREE_OPERAND (*tp, 1),
- TREE_TYPE (*tp)))
- && TREE_THIS_VOLATILE (tem) == TREE_THIS_VOLATILE (old))
- {
- tree *tem_basep = &tem;
- while (handled_component_p (*tem_basep))
- tem_basep = &TREE_OPERAND (*tem_basep, 0);
- if (TREE_CODE (*tem_basep) == MEM_REF)
- *tem_basep
- = build2 (MEM_REF, TREE_TYPE (*tem_basep),
- TREE_OPERAND (*tem_basep, 0),
- fold_convert (TREE_TYPE (TREE_OPERAND (*tp, 1)),
- TREE_OPERAND (*tem_basep, 1)));
- else
- *tem_basep
- = build2 (MEM_REF, TREE_TYPE (*tem_basep),
- build_fold_addr_expr (*tem_basep),
- build_int_cst
- (TREE_TYPE (TREE_OPERAND (*tp, 1)), 0));
- *tp = tem;
- }
+ tree *tem_basep = &tem;
+ while (handled_component_p (*tem_basep))
+ tem_basep = &TREE_OPERAND (*tem_basep, 0);
+ if (TREE_CODE (*tem_basep) == MEM_REF)
+ *tem_basep
+ = build2 (MEM_REF, TREE_TYPE (*tem_basep),
+ TREE_OPERAND (*tem_basep, 0),
+ fold_convert (TREE_TYPE (TREE_OPERAND (*tp, 1)),
+ TREE_OPERAND (*tem_basep, 1)));
else
- {
- *tp = fold_build2 (MEM_REF, TREE_TYPE (*tp),
- ptr, TREE_OPERAND (*tp, 1));
- TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
- TREE_THIS_NOTRAP (*tp) = TREE_THIS_NOTRAP (old);
- }
- TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
- *walk_subtrees = 0;
- return NULL;
+ *tem_basep
+ = build2 (MEM_REF, TREE_TYPE (*tem_basep),
+ build_fold_addr_expr (*tem_basep),
+ build_int_cst
+ (TREE_TYPE (TREE_OPERAND (*tp, 1)), 0));
+ *tp = tem;
+ }
+ else
+ {
+ *tp = fold_build2 (MEM_REF, TREE_TYPE (*tp),
+ ptr, TREE_OPERAND (*tp, 1));
+ TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
+ TREE_THIS_NOTRAP (*tp) = TREE_THIS_NOTRAP (old);
}
+ TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+ *walk_subtrees = 0;
+ return NULL;
}
/* Here is the "usual case". Copy this tree node, and then
gcc_assert (!VOID_TYPE_P (type));
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ enum machine_mode inner = TYPE_MODE (TREE_TYPE (type));
+ enum machine_mode simd
+ = targetm.vectorize.preferred_simd_mode (inner);
+ int simd_mode_size = GET_MODE_SIZE (simd);
+ return ((GET_MODE_SIZE (TYPE_MODE (type)) + simd_mode_size - 1)
+ / simd_mode_size);
+ }
+
size = int_size_in_bytes (type);
if (size < 0 || size > MOVE_MAX_PIECES * MOVE_RATIO (!optimize_size))
CASE_CONVERT:
case COMPLEX_EXPR:
case PAREN_EXPR:
+ case VIEW_CONVERT_EXPR:
return 0;
/* Assign cost of 1 to usual operations.
if (POINTER_TYPE_P (funtype))
funtype = TREE_TYPE (funtype);
- if (is_simple_builtin (decl))
+ /* Do not special case builtins where we see the body.
+ This just confuse inliner. */
+ if (!decl || cgraph_node (decl)->analyzed)
+ ;
+ /* For buitins that are likely expanded to nothing or
+ inlined do not account operand costs. */
+ else if (is_simple_builtin (decl))
return 0;
else if (is_inexpensive_builtin (decl))
- cost = weights->target_builtin_call_cost;
- else
- cost = weights->call_cost;
+ return weights->target_builtin_call_cost;
+ else if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
+ {
+ /* We canonicalize x * x to pow (x, 2.0) with -ffast-math, so
+ specialize the cheap expansion we do here.
+ ??? This asks for a more general solution. */
+ switch (DECL_FUNCTION_CODE (decl))
+ {
+ case BUILT_IN_POW:
+ case BUILT_IN_POWF:
+ case BUILT_IN_POWL:
+ if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
+ && REAL_VALUES_EQUAL
+ (TREE_REAL_CST (gimple_call_arg (stmt, 1)), dconst2))
+ return estimate_operator_cost (MULT_EXPR, weights,
+ gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 0));
+ break;
+ default:
+ break;
+ }
+ }
+
+ cost = weights->call_cost;
if (decl)
funtype = TREE_TYPE (decl);
break;
}
+ case GIMPLE_RETURN:
+ return weights->return_cost;
+
case GIMPLE_GOTO:
case GIMPLE_LABEL:
case GIMPLE_NOP:
case GIMPLE_PHI:
- case GIMPLE_RETURN:
case GIMPLE_PREDICT:
case GIMPLE_DEBUG:
return 0;
eni_size_weights.div_mod_cost = 1;
eni_size_weights.omp_cost = 40;
eni_size_weights.time_based = false;
+ eni_size_weights.return_cost = 1;
/* Estimating time for call is difficult, since we have no idea what the
called function does. In the current uses of eni_time_weights,
underestimating the cost does less harm than overestimating it, so
we choose a rather small value here. */
eni_time_weights.call_cost = 10;
- eni_time_weights.target_builtin_call_cost = 10;
+ eni_time_weights.target_builtin_call_cost = 1;
eni_time_weights.div_mod_cost = 10;
eni_time_weights.omp_cost = 40;
eni_time_weights.time_based = true;
+ eni_time_weights.return_cost = 2;
}
/* Estimate the number of instructions in a gimple_seq. */
if (gimple_code (stmt) != GIMPLE_CALL)
goto egress;
+ /* Objective C and fortran still calls tree_rest_of_compilation directly.
+ Kill this check once this is fixed. */
+ if (!id->dst_node->analyzed)
+ goto egress;
+
+ cg_edge = cgraph_edge (id->dst_node, stmt);
+ gcc_checking_assert (cg_edge);
/* First, see if we can figure out what function is being called.
If we cannot, then there is no hope of inlining the function. */
- fn = gimple_call_fndecl (stmt);
- if (!fn)
+ if (cg_edge->indirect_unknown_callee)
goto egress;
-
- /* Turn forward declarations into real ones. */
- fn = cgraph_node (fn)->decl;
+ fn = cg_edge->callee->decl;
+ gcc_checking_assert (fn);
/* If FN is a declaration of a function in a nested scope that was
globally declared inline, we don't set its DECL_INITIAL.
&& gimple_has_body_p (DECL_ABSTRACT_ORIGIN (fn)))
fn = DECL_ABSTRACT_ORIGIN (fn);
- /* Objective C and fortran still calls tree_rest_of_compilation directly.
- Kill this check once this is fixed. */
- if (!id->dst_node->analyzed)
- goto egress;
-
- cg_edge = cgraph_edge (id->dst_node, stmt);
-
/* First check that inlining isn't simply forbidden in this case. */
if (inline_forbidden_into_p (cg_edge->caller->decl, cg_edge->callee->decl))
goto egress;
/* Add local vars. */
add_local_variables (DECL_STRUCT_FUNCTION (old_decl), cfun, &id, false);
- /* Copy the Function's body. */
- copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
- ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, blocks_to_copy, new_entry);
-
if (DECL_RESULT (old_decl) != NULL_TREE)
{
- tree *res_decl = &DECL_RESULT (old_decl);
- DECL_RESULT (new_decl) = remap_decl (*res_decl, &id);
+ tree old_name;
+ DECL_RESULT (new_decl) = remap_decl (DECL_RESULT (old_decl), &id);
lang_hooks.dup_lang_specific_decl (DECL_RESULT (new_decl));
+ if (gimple_in_ssa_p (id.src_cfun)
+ && DECL_BY_REFERENCE (DECL_RESULT (old_decl))
+ && (old_name
+ = gimple_default_def (id.src_cfun, DECL_RESULT (old_decl))))
+ {
+ tree new_name = make_ssa_name (DECL_RESULT (new_decl), NULL);
+ insert_decl_map (&id, old_name, new_name);
+ SSA_NAME_DEF_STMT (new_name) = gimple_build_nop ();
+ set_default_def (DECL_RESULT (new_decl), new_name);
+ }
}
+ /* Copy the Function's body. */
+ copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, blocks_to_copy, new_entry);
+
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (new_decl);
if (inline_forbidden_into_p (caller, callee))
{
e->inline_failed = CIF_UNSPECIFIED;
- gimple_call_set_cannot_inline (e->call_stmt, true);
+ if (e->call_stmt)
+ gimple_call_set_cannot_inline (e->call_stmt, true);
return false;
}
if (!targetm.target_option.can_inline_p (caller, callee))
{
e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
- gimple_call_set_cannot_inline (e->call_stmt, true);
+ if (e->call_stmt)
+ gimple_call_set_cannot_inline (e->call_stmt, true);
e->call_stmt_cannot_inline_p = true;
return false;
}
|| !gimple_check_call_args (e->call_stmt)))
{
e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
- gimple_call_set_cannot_inline (e->call_stmt, true);
+ if (e->call_stmt)
+ gimple_call_set_cannot_inline (e->call_stmt, true);
e->call_stmt_cannot_inline_p = true;
return false;
}