struct { enum tree_code op; tree opnd; } unary;
struct { enum tree_code op; tree opnd0, opnd1; } binary;
struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
- struct { tree fn; bool pure; size_t nargs; tree *args; } call;
+ struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
} ops;
};
{
enum tree_code subcode = gimple_assign_rhs_code (stmt);
- expr->type = NULL_TREE;
-
switch (get_gimple_rhs_class (subcode))
{
case GIMPLE_SINGLE_RHS:
expr->kind = EXPR_SINGLE;
+ expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
break;
case GIMPLE_UNARY_RHS:
expr->type = TREE_TYPE (gimple_call_lhs (stmt));
expr->kind = EXPR_CALL;
- expr->ops.call.fn = gimple_call_fn (stmt);
+ expr->ops.call.fn_from = stmt;
if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
expr->ops.call.pure = true;
/* If the calls are to different functions, then they
clearly cannot be equal. */
- if (! operand_equal_p (expr0->ops.call.fn,
- expr1->ops.call.fn, 0))
+ if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
+ expr1->ops.call.fn_from))
return false;
if (! expr0->ops.call.pure)
{
size_t i;
enum tree_code code = CALL_EXPR;
+ gimple fn_from;
val = iterative_hash_object (code, val);
- val = iterative_hash_expr (expr->ops.call.fn, val);
+ fn_from = expr->ops.call.fn_from;
+ if (gimple_call_internal_p (fn_from))
+ val = iterative_hash_hashval_t
+ ((hashval_t) gimple_call_internal_fn (fn_from), val);
+ else
+ val = iterative_hash_expr (gimple_call_fn (fn_from), val);
for (i = 0; i < expr->ops.call.nargs; i++)
val = iterative_hash_expr (expr->ops.call.args[i], val);
}
{
size_t i;
size_t nargs = element->expr.ops.call.nargs;
-
- print_generic_expr (stream, element->expr.ops.call.fn, 0);
+ gimple fn_from;
+
+ fn_from = element->expr.ops.call.fn_from;
+ if (gimple_call_internal_p (fn_from))
+ fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
+ stream);
+ else
+ print_generic_expr (stream, gimple_call_fn (fn_from), 0);
fprintf (stream, " (");
for (i = 0; i < nargs; i++)
{
gimple_stmt_iterator gsi;
basic_block bb;
FOR_EACH_BB (bb)
- {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
update_stmt_if_modified (gsi_stmt (gsi));
}
}
EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
{
basic_block bb = BASIC_BLOCK (i);
- if (single_succ_p (bb) == 1
+ if (bb
+ && single_succ_p (bb)
&& (single_succ_edge (bb)->flags & EDGE_EH) == 0)
{
bitmap_clear_bit (need_eh_cleanup, i);
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func
+ TODO_cleanup_cfg
| TODO_update_ssa
- | TODO_cleanup_cfg
- | TODO_verify_ssa /* todo_flags_finish */
+ | TODO_verify_ssa
+ | TODO_verify_flow /* todo_flags_finish */
}
};
i_1 = phi (..., i_2)
i_2 = i_1 +/- ... */
-static bool
+bool
simple_iv_increment_p (gimple stmt)
{
+ enum tree_code code;
tree lhs, preinc;
gimple phi;
size_t i;
if (TREE_CODE (lhs) != SSA_NAME)
return false;
- if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
- && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
+ code = gimple_assign_rhs_code (stmt);
+ if (code != PLUS_EXPR
+ && code != MINUS_EXPR
+ && code != POINTER_PLUS_EXPR)
return false;
preinc = gimple_assign_rhs1 (stmt);
-
if (TREE_CODE (preinc) != SSA_NAME)
return false;
{
tree cond = build2 (code, boolean_type_node, op0, op1);
tree inverted = invert_truthvalue_loc (loc, cond);
+ bool can_infer_simple_equiv
+ = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
+ && real_zerop (op0));
struct edge_info *edge_info;
edge_info = allocate_edge_info (true_edge);
record_conditions (edge_info, cond, inverted);
- if (code == EQ_EXPR)
+ if (can_infer_simple_equiv && code == EQ_EXPR)
{
edge_info->lhs = op1;
edge_info->rhs = op0;
edge_info = allocate_edge_info (false_edge);
record_conditions (edge_info, inverted, cond);
- if (TREE_CODE (inverted) == EQ_EXPR)
+ if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
{
edge_info->lhs = op1;
edge_info->rhs = op0;
}
else if (TREE_CODE (op0) == SSA_NAME
- && (is_gimple_min_invariant (op1)
- || TREE_CODE (op1) == SSA_NAME))
+ && (TREE_CODE (op1) == SSA_NAME
+ || is_gimple_min_invariant (op1)))
{
tree cond = build2 (code, boolean_type_node, op0, op1);
tree inverted = invert_truthvalue_loc (loc, cond);
+ bool can_infer_simple_equiv
+ = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
+ && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
struct edge_info *edge_info;
edge_info = allocate_edge_info (true_edge);
record_conditions (edge_info, cond, inverted);
- if (code == EQ_EXPR)
+ if (can_infer_simple_equiv && code == EQ_EXPR)
{
edge_info->lhs = op0;
edge_info->rhs = op1;
edge_info = allocate_edge_info (false_edge);
record_conditions (edge_info, inverted, cond);
- if (TREE_CODE (inverted) == EQ_EXPR)
+ if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
{
edge_info->lhs = op0;
edge_info->rhs = op1;
|| useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
|| may_propagate_copy_into_stmt (stmt, cached_lhs))
{
-#if defined ENABLE_CHECKING
- gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME
- || is_gimple_min_invariant (cached_lhs));
-#endif
+ gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
+ || is_gimple_min_invariant (cached_lhs));
if (dump_file && (dump_flags & TDF_DETAILS))
{
val = SSA_NAME_VALUE (op);
if (val && val != op)
{
- /* Do not change the base variable in the virtual operand
- tables. That would make it impossible to reconstruct
- the renamed virtual operand if we later modify this
- statement. Also only allow the new value to be an SSA_NAME
- for propagation into virtual operands. */
- if (!is_gimple_reg (op)
- && (TREE_CODE (val) != SSA_NAME
- || is_gimple_reg (val)
- || get_virtual_var (val) != get_virtual_var (op)))
- return;
-
/* Do not replace hard register operands in asm statements. */
if (gimple_code (stmt) == GIMPLE_ASM
&& !may_propagate_copy_into_asm (op))
use_operand_p op_p;
ssa_op_iter iter;
- FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
- {
- if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
- cprop_operand (stmt, op_p);
- }
+ FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
+ cprop_operand (stmt, op_p);
}
/* Optimize the statement pointed to by iterator SI.
old_stmt = stmt = gsi_stmt (si);
- if (gimple_code (stmt) == GIMPLE_COND)
- canonicalize_comparison (stmt);
-
- update_stmt_if_modified (stmt);
- opt_stats.num_stmts++;
-
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Optimizing statement ");
print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
}
+ if (gimple_code (stmt) == GIMPLE_COND)
+ canonicalize_comparison (stmt);
+
+ update_stmt_if_modified (stmt);
+ opt_stats.num_stmts++;
+
/* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
cprop_into_stmt (stmt);
/* Check for redundant computations. Do this optimization only
for assignments that have no volatile ops and conditionals. */
- may_optimize_p = (!gimple_has_volatile_ops (stmt)
- && ((is_gimple_assign (stmt)
- && !gimple_rhs_has_side_effects (stmt))
+ may_optimize_p = (!gimple_has_side_effects (stmt)
+ && (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
- && gimple_call_lhs (stmt) != NULL_TREE
- && !gimple_rhs_has_side_effects (stmt))
+ && gimple_call_lhs (stmt) != NULL_TREE)
|| gimple_code (stmt) == GIMPLE_COND
|| gimple_code (stmt) == GIMPLE_SWITCH));
GIMPLE_ASSIGN, and there is no way to effect such a
transformation in-place. We might want to consider
using the more general fold_stmt here. */
- fold_stmt_inplace (use_stmt);
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
+ fold_stmt_inplace (&gsi);
+ }
/* Sometimes propagation can expose new operands to the
renamer. */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_cleanup_cfg
- | TODO_dump_func
| TODO_ggc_collect
| TODO_verify_ssa
| TODO_verify_stmts