X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-ssa-loop-ivcanon.c;h=e278c55b08bbd3f0387bf8f03a968f13fd1e9441;hb=d29a1ab5c0f85e613d581a091a251e57c6f3f8cf;hp=46e153176830a1a05a712ae65b7818bcf51d5a14;hpb=d9e7e1a2cbb1c2971b273141966258bc8c8d8629;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c index 46e15317683..e278c55b08b 100644 --- a/gcc/tree-ssa-loop-ivcanon.c +++ b/gcc/tree-ssa-loop-ivcanon.c @@ -1,11 +1,11 @@ /* Induction variable canonicalization. - Copyright (C) 2004, 2005 Free Software Foundation, Inc. + Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the -Free Software Foundation; either version 2, or (at your option) any +Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT @@ -14,9 +14,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ /* This pass detects the loops that iterate a constant number of times, adds a canonical induction variable (step -1, tested against 0) @@ -73,8 +72,9 @@ static void create_canonical_iv (struct loop *loop, edge exit, tree niter) { edge in; - tree cond, type, var; - block_stmt_iterator incr_at; + tree type, var; + gimple cond; + gimple_stmt_iterator incr_at; enum tree_code cmp; if (dump_file && (dump_flags & TDF_DETAILS)) @@ -98,31 +98,31 @@ create_canonical_iv (struct loop *loop, edge exit, tree niter) niter = fold_build2 (PLUS_EXPR, type, niter, build_int_cst (type, 1)); - incr_at = bsi_last (in->src); + incr_at = gsi_last_bb (in->src); create_iv (niter, build_int_cst (type, -1), NULL_TREE, loop, &incr_at, false, NULL, &var); cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR; - COND_EXPR_COND (cond) = build2 (cmp, boolean_type_node, - var, - build_int_cst (type, 0)); + gimple_cond_set_code (cond, cmp); + gimple_cond_set_lhs (cond, var); + gimple_cond_set_rhs (cond, build_int_cst (type, 0)); update_stmt (cond); } -/* Computes an estimated number of insns in LOOP. */ +/* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */ unsigned -tree_num_loop_insns (struct loop *loop) +tree_num_loop_insns (struct loop *loop, eni_weights *weights) { basic_block *body = get_loop_body (loop); - block_stmt_iterator bsi; + gimple_stmt_iterator gsi; unsigned size = 1, i; for (i = 0; i < loop->num_nodes; i++) - for (bsi = bsi_start (body[i]); !bsi_end_p (bsi); bsi_next (&bsi)) - size += estimate_num_insns (bsi_stmt (bsi)); + for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) + size += estimate_num_insns (gsi_stmt (gsi), weights); free (body); return size; @@ -154,18 +154,17 @@ estimated_unrolled_size (unsigned HOST_WIDE_INT ninsns, return unr_insns; } -/* Tries to unroll LOOP completely, i.e. NITER times. LOOPS is the - loop tree. UL determines which loops we are allowed to unroll. +/* Tries to unroll LOOP completely, i.e. NITER times. + UL determines which loops we are allowed to unroll. EXIT is the exit of the loop that should be eliminated. */ static bool -try_unroll_loop_completely (struct loops *loops ATTRIBUTE_UNUSED, - struct loop *loop, +try_unroll_loop_completely (struct loop *loop, edge exit, tree niter, enum unroll_level ul) { unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns; - tree old_cond, cond, dont_exit, do_exit; + gimple cond; if (loop->inner) return false; @@ -183,81 +182,76 @@ try_unroll_loop_completely (struct loops *loops ATTRIBUTE_UNUSED, if (ul == UL_SINGLE_ITER) return false; - ninsns = tree_num_loop_insns (loop); + ninsns = tree_num_loop_insns (loop, &eni_size_weights); - if (n_unroll * ninsns - > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)) - return false; + unr_insns = estimated_unrolled_size (ninsns, n_unroll); + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, " Loop size: %d\n", (int) ninsns); + fprintf (dump_file, " Estimated size after unrolling: %d\n", + (int) unr_insns); + } - if (ul == UL_NO_GROWTH) + if (unr_insns > ninsns + && (unr_insns + > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))) { - unr_insns = estimated_unrolled_size (ninsns, n_unroll); - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, " Loop size: %d\n", (int) ninsns); - fprintf (dump_file, " Estimated size after unrolling: %d\n", - (int) unr_insns); - } - - if (unr_insns > ninsns) - { - if (dump_file && (dump_flags & TDF_DETAILS)) - fprintf (dump_file, "Not unrolling loop %d:\n", loop->num); - return false; - } + fprintf (dump_file, "Not unrolling loop %d " + "(--param max-completely-peeled-insns limit reached).\n", + loop->num); + return false; } - } - if (exit->flags & EDGE_TRUE_VALUE) - { - dont_exit = boolean_false_node; - do_exit = boolean_true_node; - } - else - { - dont_exit = boolean_true_node; - do_exit = boolean_false_node; + if (ul == UL_NO_GROWTH + && unr_insns > ninsns) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, "Not unrolling loop %d.\n", loop->num); + return false; + } } - cond = last_stmt (exit->src); - + if (n_unroll) { sbitmap wont_exit; - edge *edges_to_remove = XNEWVEC (edge, n_unroll); - unsigned int n_to_remove = 0; + edge e; + unsigned i; + VEC (edge, heap) *to_remove = NULL; - old_cond = COND_EXPR_COND (cond); - COND_EXPR_COND (cond) = dont_exit; - update_stmt (cond); initialize_original_copy_tables (); - wont_exit = sbitmap_alloc (n_unroll + 1); sbitmap_ones (wont_exit); RESET_BIT (wont_exit, 0); - if (!tree_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, n_unroll, wont_exit, - exit, edges_to_remove, - &n_to_remove, - DLTHE_FLAG_UPDATE_FREQ - | DLTHE_FLAG_COMPLETTE_PEEL)) + if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), + n_unroll, wont_exit, + exit, &to_remove, + DLTHE_FLAG_UPDATE_FREQ + | DLTHE_FLAG_COMPLETTE_PEEL)) { - COND_EXPR_COND (cond) = old_cond; - update_stmt (cond); free_original_copy_tables (); free (wont_exit); - free (edges_to_remove); return false; } + + for (i = 0; VEC_iterate (edge, to_remove, i, e); i++) + { + bool ok = remove_path (e); + gcc_assert (ok); + } + + VEC_free (edge, heap, to_remove); free (wont_exit); - free (edges_to_remove); free_original_copy_tables (); } - - COND_EXPR_COND (cond) = do_exit; - update_stmt (cond); + cond = last_stmt (exit->src); + if (exit->flags & EDGE_TRUE_VALUE) + gimple_cond_make_true (cond); + else + gimple_cond_make_false (cond); + update_stmt (cond); update_ssa (TODO_update_ssa); if (dump_file && (dump_flags & TDF_DETAILS)) @@ -266,32 +260,26 @@ try_unroll_loop_completely (struct loops *loops ATTRIBUTE_UNUSED, return true; } -/* Adds a canonical induction variable to LOOP if suitable. LOOPS is the loops - tree. CREATE_IV is true if we may create a new iv. UL determines +/* Adds a canonical induction variable to LOOP if suitable. + CREATE_IV is true if we may create a new iv. UL determines which loops we are allowed to completely unroll. If TRY_EVAL is true, we try to determine the number of iterations of a loop by direct evaluation. Returns true if cfg is changed. */ static bool -canonicalize_loop_induction_variables (struct loops *loops, struct loop *loop, +canonicalize_loop_induction_variables (struct loop *loop, bool create_iv, enum unroll_level ul, bool try_eval) { edge exit = NULL; tree niter; - niter = number_of_iterations_in_loop (loop); + niter = number_of_latch_executions (loop); if (TREE_CODE (niter) == INTEGER_CST) { exit = single_exit (loop); if (!just_once_each_iteration_p (loop, exit->src)) return false; - - /* The result of number_of_iterations_in_loop is by one higher than - we expect (i.e. it returns number of executions of the exit - condition, not of the loop latch edge). */ - niter = fold_build2 (MINUS_EXPR, TREE_TYPE (niter), niter, - build_int_cst (TREE_TYPE (niter), 1)); } else { @@ -318,7 +306,7 @@ canonicalize_loop_induction_variables (struct loops *loops, struct loop *loop, fprintf (dump_file, " times.\n"); } - if (try_unroll_loop_completely (loops, loop, exit, niter, ul)) + if (try_unroll_loop_completely (loop, exit, niter, ul)) return true; if (create_iv) @@ -328,23 +316,20 @@ canonicalize_loop_induction_variables (struct loops *loops, struct loop *loop, } /* The main entry point of the pass. Adds canonical induction variables - to the suitable LOOPS. */ + to the suitable loops. */ unsigned int -canonicalize_induction_variables (struct loops *loops) +canonicalize_induction_variables (void) { - unsigned i; + loop_iterator li; struct loop *loop; bool changed = false; - for (i = 1; i < loops->num; i++) + FOR_EACH_LOOP (li, loop, 0) { - loop = loops->parray[i]; - - if (loop) - changed |= canonicalize_loop_induction_variables (loops, loop, - true, UL_SINGLE_ITER, - true); + changed |= canonicalize_loop_induction_variables (loop, + true, UL_SINGLE_ITER, + true); } /* Clean up the information about numbers of iterations, since brute force @@ -361,35 +346,46 @@ canonicalize_induction_variables (struct loops *loops) size of the code does not increase. */ unsigned int -tree_unroll_loops_completely (struct loops *loops, bool may_increase_size) +tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) { - unsigned i; + loop_iterator li; struct loop *loop; - bool changed = false; + bool changed; enum unroll_level ul; - for (i = 1; i < loops->num; i++) + do { - loop = loops->parray[i]; + changed = false; - if (!loop) - continue; + FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST) + { + if (may_increase_size && optimize_loop_for_speed_p (loop) + /* Unroll outermost loops only if asked to do so or they do + not cause code growth. */ + && (unroll_outer + || loop_outer (loop_outer (loop)))) + ul = UL_ALL; + else + ul = UL_NO_GROWTH; + changed |= canonicalize_loop_induction_variables + (loop, false, ul, !flag_tree_loop_ivcanon); + } - if (may_increase_size && maybe_hot_bb_p (loop->header)) - ul = UL_ALL; - else - ul = UL_NO_GROWTH; - changed |= canonicalize_loop_induction_variables (loops, loop, - false, ul, - !flag_tree_loop_ivcanon); + if (changed) + { + /* This will take care of removing completely unrolled loops + from the loop structures so we can continue unrolling now + innermost loops. */ + if (cleanup_tree_cfg ()) + update_ssa (TODO_update_ssa_only_virtuals); + + /* Clean up the information about numbers of iterations, since + complete unrolling might have invalidated it. */ + scev_reset (); + } } + while (changed); - /* Clean up the information about numbers of iterations, since complete - unrolling might have invalidated it. */ - scev_reset (); - - if (changed) - return TODO_cleanup_cfg; return 0; } @@ -400,11 +396,9 @@ empty_loop_p (struct loop *loop) { edge exit; struct tree_niter_desc niter; - tree phi, def; basic_block *body; - block_stmt_iterator bsi; + gimple_stmt_iterator gsi; unsigned i; - tree stmt; /* If the loop has multiple exits, it is too hard for us to handle. Similarly, if the exit is not dominating, we cannot determine @@ -418,8 +412,11 @@ empty_loop_p (struct loop *loop) return false; /* Values of all loop exit phi nodes must be invariants. */ - for (phi = phi_nodes (exit->dest); phi; phi = PHI_CHAIN (phi)) + for (gsi = gsi_start(phi_nodes (exit->dest)); !gsi_end_p (gsi); gsi_next (&gsi)) { + gimple phi = gsi_stmt (gsi); + tree def; + if (!is_gimple_reg (PHI_RESULT (phi))) continue; @@ -441,11 +438,12 @@ empty_loop_p (struct loop *loop) return false; } - for (bsi = bsi_start (body[i]); !bsi_end_p (bsi); bsi_next (&bsi)) + for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { - stmt = bsi_stmt (bsi); + gimple stmt = gsi_stmt (gsi); + if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_VIRTUAL_DEFS) - || stmt_ann (stmt)->has_volatile_ops) + || gimple_has_volatile_ops (stmt)) { free (body); return false; @@ -453,25 +451,19 @@ empty_loop_p (struct loop *loop) /* Also, asm statements and calls may have side effects and we cannot change the number of times they are executed. */ - switch (TREE_CODE (stmt)) + switch (gimple_code (stmt)) { - case RETURN_EXPR: - case MODIFY_EXPR: - stmt = get_call_expr_in (stmt); - if (!stmt) - break; - - case CALL_EXPR: - if (TREE_SIDE_EFFECTS (stmt)) + case GIMPLE_CALL: + if (gimple_has_side_effects (stmt)) { free (body); return false; } break; - case ASM_EXPR: + case GIMPLE_ASM: /* We cannot remove volatile assembler. */ - if (ASM_VOLATILE_P (stmt)) + if (gimple_asm_volatile_p (stmt)) { free (body); return false; @@ -494,22 +486,22 @@ static void remove_empty_loop (struct loop *loop) { edge exit = single_dom_exit (loop), non_exit; - tree cond_stmt = last_stmt (exit->src); - tree do_exit; + gimple cond_stmt = last_stmt (exit->src); basic_block *body; unsigned n_before, freq_in, freq_h; gcov_type exit_count = exit->count; + if (dump_file) + fprintf (dump_file, "Removing empty loop %d\n", loop->num); + non_exit = EDGE_SUCC (exit->src, 0); if (non_exit == exit) non_exit = EDGE_SUCC (exit->src, 1); if (exit->flags & EDGE_TRUE_VALUE) - do_exit = boolean_true_node; + gimple_cond_make_true (cond_stmt); else - do_exit = boolean_false_node; - - COND_EXPR_COND (cond_stmt) = do_exit; + gimple_cond_make_false (cond_stmt); update_stmt (cond_stmt); /* Let us set the probabilities of the edges coming from the exit block. */ @@ -562,15 +554,15 @@ try_remove_empty_loop (struct loop *loop, bool *changed) return true; } -/* Remove the empty LOOPS. */ +/* Remove the empty loops. */ unsigned int -remove_empty_loops (struct loops *loops) +remove_empty_loops (void) { bool changed = false; struct loop *loop; - for (loop = loops->tree_root->inner; loop; loop = loop->next) + for (loop = current_loops->tree_root->inner; loop; loop = loop->next) try_remove_empty_loop (loop, &changed); if (changed) @@ -580,3 +572,4 @@ remove_empty_loops (struct loops *loops) } return 0; } +