X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-ssa-dom.c;h=949acf1d16826fc10930ea0b88f97bc200e02896;hb=ba2f8f6bb45dad549b9ba03546ff5d1f2ff4bad7;hp=d15df31238d6b057599dc8bbe68757ca766dd305;hpb=00f4f70565fa903b89c2f1be5059898ddb069db0;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c index d15df31238d..949acf1d168 100644 --- a/gcc/tree-ssa-dom.c +++ b/gcc/tree-ssa-dom.c @@ -64,18 +64,21 @@ struct hashable_expr struct { enum tree_code op; tree opnd; } unary; struct { enum tree_code op; tree opnd0, opnd1; } binary; struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary; - struct { tree fn; bool pure; size_t nargs; tree *args; } call; + struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call; } ops; }; /* Structure for recording known values of a conditional expression at the exits from its block. */ -struct cond_equivalence +typedef struct cond_equivalence_s { struct hashable_expr cond; tree value; -}; +} cond_equivalence; + +DEF_VEC_O(cond_equivalence); +DEF_VEC_ALLOC_O(cond_equivalence,heap); /* Structure for recording edge equivalences as well as any pending edge redirections during the dominator optimizer. @@ -99,11 +102,8 @@ struct edge_info tree rhs; /* Traversing an edge may also indicate one or more particular conditions - are true or false. The number of recorded conditions can vary, but - can be determined by the condition's code. So we have an array - and its maximum index rather than use a varray. */ - struct cond_equivalence *cond_equivalences; - unsigned int max_cond_equivalences; + are true or false. */ + VEC(cond_equivalence, heap) *cond_equivalences; }; /* Hash table with expressions made available during the renaming process. @@ -179,7 +179,7 @@ static hashval_t avail_expr_hash (const void *); static hashval_t real_avail_expr_hash (const void *); static int avail_expr_eq (const void *, const void *); static void htab_statistics (FILE *, htab_t); -static void record_cond (struct cond_equivalence *); +static void record_cond (cond_equivalence *); static void record_const_or_copy (tree, tree); static void record_equality (tree, tree); static void record_equivalences_from_phis (basic_block); @@ -208,12 +208,11 @@ initialize_hash_element (gimple stmt, tree lhs, { enum tree_code subcode = gimple_assign_rhs_code (stmt); - expr->type = NULL_TREE; - switch (get_gimple_rhs_class (subcode)) { case GIMPLE_SINGLE_RHS: expr->kind = EXPR_SINGLE; + expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt)); expr->ops.single.rhs = gimple_assign_rhs1 (stmt); break; case GIMPLE_UNARY_RHS: @@ -258,7 +257,7 @@ initialize_hash_element (gimple stmt, tree lhs, expr->type = TREE_TYPE (gimple_call_lhs (stmt)); expr->kind = EXPR_CALL; - expr->ops.call.fn = gimple_call_fn (stmt); + expr->ops.call.fn_from = stmt; if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)) expr->ops.call.pure = true; @@ -422,8 +421,8 @@ hashable_expr_equal_p (const struct hashable_expr *expr0, /* If the calls are to different functions, then they clearly cannot be equal. */ - if (! operand_equal_p (expr0->ops.call.fn, - expr1->ops.call.fn, 0)) + if (!gimple_call_same_target_p (expr0->ops.call.fn_from, + expr1->ops.call.fn_from)) return false; if (! expr0->ops.call.pure) @@ -503,9 +502,15 @@ iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val) { size_t i; enum tree_code code = CALL_EXPR; + gimple fn_from; val = iterative_hash_object (code, val); - val = iterative_hash_expr (expr->ops.call.fn, val); + fn_from = expr->ops.call.fn_from; + if (gimple_call_internal_p (fn_from)) + val = iterative_hash_hashval_t + ((hashval_t) gimple_call_internal_fn (fn_from), val); + else + val = iterative_hash_expr (gimple_call_fn (fn_from), val); for (i = 0; i < expr->ops.call.nargs; i++) val = iterative_hash_expr (expr->ops.call.args[i], val); } @@ -565,8 +570,14 @@ print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element) { size_t i; size_t nargs = element->expr.ops.call.nargs; - - print_generic_expr (stream, element->expr.ops.call.fn, 0); + gimple fn_from; + + fn_from = element->expr.ops.call.fn_from; + if (gimple_call_internal_p (fn_from)) + fputs (internal_fn_name (gimple_call_internal_fn (fn_from)), + stream); + else + print_generic_expr (stream, gimple_call_fn (fn_from), 0); fprintf (stream, " ("); for (i = 0; i < nargs; i++) { @@ -636,7 +647,7 @@ free_all_edge_infos (void) if (edge_info) { if (edge_info->cond_equivalences) - free (edge_info->cond_equivalences); + VEC_free (cond_equivalence, heap, edge_info->cond_equivalences); free (edge_info); e->aux = NULL; } @@ -701,7 +712,8 @@ tree_ssa_dominator_optimize (void) gimple_stmt_iterator gsi; basic_block bb; FOR_EACH_BB (bb) - {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) + { + for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) update_stmt_if_modified (gsi_stmt (gsi)); } } @@ -734,7 +746,8 @@ tree_ssa_dominator_optimize (void) EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi) { basic_block bb = BASIC_BLOCK (i); - if (single_succ_p (bb) == 1 + if (bb + && single_succ_p (bb) && (single_succ_edge (bb)->flags & EDGE_EH) == 0) { bitmap_clear_bit (need_eh_cleanup, i); @@ -799,10 +812,10 @@ struct gimple_opt_pass pass_dominator = 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ - TODO_dump_func + TODO_cleanup_cfg | TODO_update_ssa - | TODO_cleanup_cfg - | TODO_verify_ssa /* todo_flags_finish */ + | TODO_verify_ssa + | TODO_verify_flow /* todo_flags_finish */ } }; @@ -1059,14 +1072,14 @@ record_equivalences_from_incoming_edge (basic_block bb) { tree lhs = edge_info->lhs; tree rhs = edge_info->rhs; - struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences; + cond_equivalence *eq; if (lhs) record_equality (lhs, rhs); - if (cond_equivalences) - for (i = 0; i < edge_info->max_cond_equivalences; i++) - record_cond (&cond_equivalences[i]); + for (i = 0; VEC_iterate (cond_equivalence, + edge_info->cond_equivalences, i, eq); ++i) + record_cond (eq); } } } @@ -1114,7 +1127,7 @@ htab_statistics (FILE *file, htab_t htab) boolean value. */ static void -record_cond (struct cond_equivalence *p) +record_cond (cond_equivalence *p) { struct expr_hash_elt *element = XCNEW (struct expr_hash_elt); void **slot; @@ -1140,14 +1153,15 @@ record_cond (struct cond_equivalence *p) } /* Build a cond_equivalence record indicating that the comparison - CODE holds between operands OP0 and OP1. */ + CODE holds between operands OP0 and OP1 and push it to **P. */ static void build_and_record_new_cond (enum tree_code code, tree op0, tree op1, - struct cond_equivalence *p) + VEC(cond_equivalence, heap) **p) { - struct hashable_expr *cond = &p->cond; + cond_equivalence c; + struct hashable_expr *cond = &c.cond; gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison); @@ -1157,7 +1171,8 @@ build_and_record_new_cond (enum tree_code code, cond->ops.binary.opnd0 = op0; cond->ops.binary.opnd1 = op1; - p->value = boolean_true_node; + c.value = boolean_true_node; + VEC_safe_push (cond_equivalence, heap, *p, &c); } /* Record that COND is true and INVERTED is false into the edge information @@ -1170,6 +1185,7 @@ static void record_conditions (struct edge_info *edge_info, tree cond, tree inverted) { tree op0, op1; + cond_equivalence c; if (!COMPARISON_CLASS_P (cond)) return; @@ -1183,125 +1199,96 @@ record_conditions (struct edge_info *edge_info, tree cond, tree inverted) case GT_EXPR: if (FLOAT_TYPE_P (TREE_TYPE (op0))) { - edge_info->max_cond_equivalences = 6; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 6); build_and_record_new_cond (ORDERED_EXPR, op0, op1, - &edge_info->cond_equivalences[4]); + &edge_info->cond_equivalences); build_and_record_new_cond (LTGT_EXPR, op0, op1, - &edge_info->cond_equivalences[5]); - } - else - { - edge_info->max_cond_equivalences = 4; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4); + &edge_info->cond_equivalences); } build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR ? LE_EXPR : GE_EXPR), - op0, op1, &edge_info->cond_equivalences[2]); + op0, op1, &edge_info->cond_equivalences); build_and_record_new_cond (NE_EXPR, op0, op1, - &edge_info->cond_equivalences[3]); + &edge_info->cond_equivalences); break; case GE_EXPR: case LE_EXPR: if (FLOAT_TYPE_P (TREE_TYPE (op0))) { - edge_info->max_cond_equivalences = 3; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 3); build_and_record_new_cond (ORDERED_EXPR, op0, op1, - &edge_info->cond_equivalences[2]); - } - else - { - edge_info->max_cond_equivalences = 2; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2); + &edge_info->cond_equivalences); } break; case EQ_EXPR: if (FLOAT_TYPE_P (TREE_TYPE (op0))) { - edge_info->max_cond_equivalences = 5; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 5); build_and_record_new_cond (ORDERED_EXPR, op0, op1, - &edge_info->cond_equivalences[4]); - } - else - { - edge_info->max_cond_equivalences = 4; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4); + &edge_info->cond_equivalences); } build_and_record_new_cond (LE_EXPR, op0, op1, - &edge_info->cond_equivalences[2]); + &edge_info->cond_equivalences); build_and_record_new_cond (GE_EXPR, op0, op1, - &edge_info->cond_equivalences[3]); + &edge_info->cond_equivalences); break; case UNORDERED_EXPR: - edge_info->max_cond_equivalences = 8; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 8); build_and_record_new_cond (NE_EXPR, op0, op1, - &edge_info->cond_equivalences[2]); + &edge_info->cond_equivalences); build_and_record_new_cond (UNLE_EXPR, op0, op1, - &edge_info->cond_equivalences[3]); + &edge_info->cond_equivalences); build_and_record_new_cond (UNGE_EXPR, op0, op1, - &edge_info->cond_equivalences[4]); + &edge_info->cond_equivalences); build_and_record_new_cond (UNEQ_EXPR, op0, op1, - &edge_info->cond_equivalences[5]); + &edge_info->cond_equivalences); build_and_record_new_cond (UNLT_EXPR, op0, op1, - &edge_info->cond_equivalences[6]); + &edge_info->cond_equivalences); build_and_record_new_cond (UNGT_EXPR, op0, op1, - &edge_info->cond_equivalences[7]); + &edge_info->cond_equivalences); break; case UNLT_EXPR: case UNGT_EXPR: - edge_info->max_cond_equivalences = 4; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4); build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR ? UNLE_EXPR : UNGE_EXPR), - op0, op1, &edge_info->cond_equivalences[2]); + op0, op1, &edge_info->cond_equivalences); build_and_record_new_cond (NE_EXPR, op0, op1, - &edge_info->cond_equivalences[3]); + &edge_info->cond_equivalences); break; case UNEQ_EXPR: - edge_info->max_cond_equivalences = 4; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4); build_and_record_new_cond (UNLE_EXPR, op0, op1, - &edge_info->cond_equivalences[2]); + &edge_info->cond_equivalences); build_and_record_new_cond (UNGE_EXPR, op0, op1, - &edge_info->cond_equivalences[3]); + &edge_info->cond_equivalences); break; case LTGT_EXPR: - edge_info->max_cond_equivalences = 4; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4); build_and_record_new_cond (NE_EXPR, op0, op1, - &edge_info->cond_equivalences[2]); + &edge_info->cond_equivalences); build_and_record_new_cond (ORDERED_EXPR, op0, op1, - &edge_info->cond_equivalences[3]); + &edge_info->cond_equivalences); break; default: - edge_info->max_cond_equivalences = 2; - edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2); break; } /* Now store the original true and false conditions into the first two slots. */ - initialize_expr_from_cond (cond, &edge_info->cond_equivalences[0].cond); - edge_info->cond_equivalences[0].value = boolean_true_node; + initialize_expr_from_cond (cond, &c.cond); + c.value = boolean_true_node; + VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c); /* It is possible for INVERTED to be the negation of a comparison, and not a valid RHS or GIMPLE_COND condition. This happens because invert_truthvalue may return such an expression when asked to invert a floating-point comparison. These comparisons are not assumed to obey the trichotomy law. */ - initialize_expr_from_cond (inverted, &edge_info->cond_equivalences[1].cond); - edge_info->cond_equivalences[1].value = boolean_false_node; + initialize_expr_from_cond (inverted, &c.cond); + c.value = boolean_false_node; + VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c); } /* A helper function for record_const_or_copy and record_equality. @@ -1422,9 +1409,10 @@ record_equality (tree x, tree y) i_1 = phi (..., i_2) i_2 = i_1 +/- ... */ -static bool +bool simple_iv_increment_p (gimple stmt) { + enum tree_code code; tree lhs, preinc; gimple phi; size_t i; @@ -1436,12 +1424,13 @@ simple_iv_increment_p (gimple stmt) if (TREE_CODE (lhs) != SSA_NAME) return false; - if (gimple_assign_rhs_code (stmt) != PLUS_EXPR - && gimple_assign_rhs_code (stmt) != MINUS_EXPR) + code = gimple_assign_rhs_code (stmt); + if (code != PLUS_EXPR + && code != MINUS_EXPR + && code != POINTER_PLUS_EXPR) return false; preinc = gimple_assign_rhs1 (stmt); - if (TREE_CODE (preinc) != SSA_NAME) return false; @@ -1621,12 +1610,15 @@ record_edge_info (basic_block bb) { tree cond = build2 (code, boolean_type_node, op0, op1); tree inverted = invert_truthvalue_loc (loc, cond); + bool can_infer_simple_equiv + = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0))) + && real_zerop (op0)); struct edge_info *edge_info; edge_info = allocate_edge_info (true_edge); record_conditions (edge_info, cond, inverted); - if (code == EQ_EXPR) + if (can_infer_simple_equiv && code == EQ_EXPR) { edge_info->lhs = op1; edge_info->rhs = op0; @@ -1635,7 +1627,7 @@ record_edge_info (basic_block bb) edge_info = allocate_edge_info (false_edge); record_conditions (edge_info, inverted, cond); - if (code == NE_EXPR) + if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR) { edge_info->lhs = op1; edge_info->rhs = op0; @@ -1643,17 +1635,20 @@ record_edge_info (basic_block bb) } else if (TREE_CODE (op0) == SSA_NAME - && (is_gimple_min_invariant (op1) - || TREE_CODE (op1) == SSA_NAME)) + && (TREE_CODE (op1) == SSA_NAME + || is_gimple_min_invariant (op1))) { tree cond = build2 (code, boolean_type_node, op0, op1); tree inverted = invert_truthvalue_loc (loc, cond); + bool can_infer_simple_equiv + = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1))) + && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1))); struct edge_info *edge_info; edge_info = allocate_edge_info (true_edge); record_conditions (edge_info, cond, inverted); - if (code == EQ_EXPR) + if (can_infer_simple_equiv && code == EQ_EXPR) { edge_info->lhs = op0; edge_info->rhs = op1; @@ -1662,7 +1657,7 @@ record_edge_info (basic_block bb) edge_info = allocate_edge_info (false_edge); record_conditions (edge_info, inverted, cond); - if (TREE_CODE (cond) == NE_EXPR) + if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR) { edge_info->lhs = op0; edge_info->rhs = op1; @@ -1749,7 +1744,7 @@ dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb) our equivalence tables. */ if (edge_info) { - struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences; + cond_equivalence *eq; tree lhs = edge_info->lhs; tree rhs = edge_info->rhs; @@ -1759,9 +1754,9 @@ dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb) /* If we have 0 = COND or 1 = COND equivalences, record them into our expression hash tables. */ - if (cond_equivalences) - for (i = 0; i < edge_info->max_cond_equivalences; i++) - record_cond (&cond_equivalences[i]); + for (i = 0; VEC_iterate (cond_equivalence, + edge_info->cond_equivalences, i, eq); ++i) + record_cond (eq); } dom_thread_across_edge (walk_data, true_edge); @@ -1784,7 +1779,7 @@ dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb) our equivalence tables. */ if (edge_info) { - struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences; + cond_equivalence *eq; tree lhs = edge_info->lhs; tree rhs = edge_info->rhs; @@ -1794,9 +1789,9 @@ dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb) /* If we have 0 = COND or 1 = COND equivalences, record them into our expression hash tables. */ - if (cond_equivalences) - for (i = 0; i < edge_info->max_cond_equivalences; i++) - record_cond (&cond_equivalences[i]); + for (i = 0; VEC_iterate (cond_equivalence, + edge_info->cond_equivalences, i, eq); ++i) + record_cond (eq); } /* Now thread the edge. */ @@ -1878,10 +1873,8 @@ eliminate_redundant_computations (gimple_stmt_iterator* gsi) || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))) || may_propagate_copy_into_stmt (stmt, cached_lhs)) { -#if defined ENABLE_CHECKING - gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME - || is_gimple_min_invariant (cached_lhs)); -#endif + gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME + || is_gimple_min_invariant (cached_lhs)); if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -2008,17 +2001,6 @@ cprop_operand (gimple stmt, use_operand_p op_p) val = SSA_NAME_VALUE (op); if (val && val != op) { - /* Do not change the base variable in the virtual operand - tables. That would make it impossible to reconstruct - the renamed virtual operand if we later modify this - statement. Also only allow the new value to be an SSA_NAME - for propagation into virtual operands. */ - if (!is_gimple_reg (op) - && (TREE_CODE (val) != SSA_NAME - || is_gimple_reg (val) - || get_virtual_var (val) != get_virtual_var (op))) - return; - /* Do not replace hard register operands in asm statements. */ if (gimple_code (stmt) == GIMPLE_ASM && !may_propagate_copy_into_asm (op)) @@ -2089,11 +2071,8 @@ cprop_into_stmt (gimple stmt) use_operand_p op_p; ssa_op_iter iter; - FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES) - { - if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME) - cprop_operand (stmt, op_p); - } + FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE) + cprop_operand (stmt, op_p); } /* Optimize the statement pointed to by iterator SI. @@ -2120,18 +2099,18 @@ optimize_stmt (basic_block bb, gimple_stmt_iterator si) old_stmt = stmt = gsi_stmt (si); - if (gimple_code (stmt) == GIMPLE_COND) - canonicalize_comparison (stmt); - - update_stmt_if_modified (stmt); - opt_stats.num_stmts++; - if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Optimizing statement "); print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); } + if (gimple_code (stmt) == GIMPLE_COND) + canonicalize_comparison (stmt); + + update_stmt_if_modified (stmt); + opt_stats.num_stmts++; + /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */ cprop_into_stmt (stmt); @@ -2204,6 +2183,48 @@ optimize_stmt (basic_block bb, gimple_stmt_iterator si) update_stmt_if_modified (stmt); eliminate_redundant_computations (&si); stmt = gsi_stmt (si); + + /* Perform simple redundant store elimination. */ + if (gimple_assign_single_p (stmt) + && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) + { + tree lhs = gimple_assign_lhs (stmt); + tree rhs = gimple_assign_rhs1 (stmt); + tree cached_lhs; + gimple new_stmt; + if (TREE_CODE (rhs) == SSA_NAME) + { + tree tem = SSA_NAME_VALUE (rhs); + if (tem) + rhs = tem; + } + /* Build a new statement with the RHS and LHS exchanged. */ + if (TREE_CODE (rhs) == SSA_NAME) + { + gimple defstmt = SSA_NAME_DEF_STMT (rhs); + new_stmt = gimple_build_assign (rhs, lhs); + SSA_NAME_DEF_STMT (rhs) = defstmt; + } + else + new_stmt = gimple_build_assign (rhs, lhs); + gimple_set_vuse (new_stmt, gimple_vuse (stmt)); + cached_lhs = lookup_avail_expr (new_stmt, false); + if (cached_lhs + && rhs == cached_lhs) + { + basic_block bb = gimple_bb (stmt); + int lp_nr = lookup_stmt_eh_lp (stmt); + unlink_stmt_vdef (stmt); + gsi_remove (&si, true); + if (lp_nr != 0) + { + bitmap_set_bit (need_eh_cleanup, bb->index); + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, " Flagged to clear EH edges.\n"); + } + return; + } + } } /* Record any additional equivalences created by this statement. */ @@ -2559,6 +2580,20 @@ propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_name continue; } + /* It's not ok to propagate into the definition stmt of RHS. + : + # prephitmp.12_36 = PHI + g_67.1_6 = prephitmp.12_36; + goto ; + While this is strictly all dead code we do not want to + deal with this here. */ + if (TREE_CODE (rhs) == SSA_NAME + && SSA_NAME_DEF_STMT (rhs) == use_stmt) + { + all = false; + continue; + } + /* Dump details. */ if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -2613,7 +2648,10 @@ propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_name GIMPLE_ASSIGN, and there is no way to effect such a transformation in-place. We might want to consider using the more general fold_stmt here. */ - fold_stmt_inplace (use_stmt); + { + gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt); + fold_stmt_inplace (&gsi); + } /* Sometimes propagation can expose new operands to the renamer. */ @@ -2926,7 +2964,6 @@ struct gimple_opt_pass pass_phi_only_cprop = 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_cleanup_cfg - | TODO_dump_func | TODO_ggc_collect | TODO_verify_ssa | TODO_verify_stmts