X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Floop-unroll.c;h=8ea449d6bb772d60c74065a97c6a9903f7924429;hp=d25caf9ba1782b4150b305112259efbe3f1e7903;hb=6e4dbd5b8bf7191bbd1f17c1ae21281e0e9390d4;hpb=4c36ffe68d981c213d168cf07f42dcc558bc7f1b diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index d25caf9ba17..8ea449d6bb7 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -1,11 +1,12 @@ /* Loop unrolling and peeling. - Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008, 2010 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -14,9 +15,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -32,7 +32,8 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "output.h" #include "expr.h" #include "hashtab.h" -#include "recog.h" +#include "recog.h" +#include "target.h" /* This pass performs loop unrolling and peeling. We only perform these optimizations on innermost loops (with single exception) because @@ -77,30 +78,33 @@ struct iv_to_split rtx base_var; /* The variable on that the values in the further iterations are based. */ rtx step; /* Step of the induction variable. */ + struct iv_to_split *next; /* Next entry in walking order. */ unsigned n_loc; unsigned loc[3]; /* Location where the definition of the induction variable occurs in the insn. For example if N_LOC is 2, the expression is located at - XEXP (XEXP (single_set, loc[0]), loc[1]). */ + XEXP (XEXP (single_set, loc[0]), loc[1]). */ }; -DEF_VEC_P(rtx); -DEF_VEC_ALLOC_P(rtx,heap); - /* Information about accumulators to expand. */ struct var_to_expand { rtx insn; /* The insn in that the variable expansion occurs. */ rtx reg; /* The accumulator which is expanded. */ - VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */ - enum rtx_code op; /* The type of the accumulation - addition, subtraction + VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */ + struct var_to_expand *next; /* Next entry in walking order. */ + enum rtx_code op; /* The type of the accumulation - addition, subtraction or multiplication. */ int expansion_count; /* Count the number of expansions generated so far. */ int reuse_expansion; /* The expansion we intend to reuse to expand - the accumulator. If REUSE_EXPANSION is 0 reuse - the original accumulator. Else use + the accumulator. If REUSE_EXPANSION is 0 reuse + the original accumulator. Else use var_expansions[REUSE_EXPANSION - 1]. */ + unsigned accum_pos; /* The position in which the accumulator is placed in + the insn src. For example in x = x + something + accum_pos is 0 while in x = something + x accum_pos + is 1. */ }; /* Information about optimization applied in @@ -109,70 +113,63 @@ struct var_to_expand struct opt_info { htab_t insns_to_split; /* A hashtable of insns to split. */ + struct iv_to_split *iv_to_split_head; /* The first iv to split. */ + struct iv_to_split **iv_to_split_tail; /* Pointer to the tail of the list. */ htab_t insns_with_var_to_expand; /* A hashtable of insns with accumulators to expand. */ + struct var_to_expand *var_to_expand_head; /* The first var to expand. */ + struct var_to_expand **var_to_expand_tail; /* Pointer to the tail of the list. */ unsigned first_new_block; /* The first basic block that was duplicated. */ basic_block loop_exit; /* The loop exit basic block. */ basic_block loop_preheader; /* The loop preheader basic block. */ }; -static void decide_unrolling_and_peeling (struct loops *, int); -static void peel_loops_completely (struct loops *, int); +static void decide_unrolling_and_peeling (int); +static void peel_loops_completely (int); static void decide_peel_simple (struct loop *, int); static void decide_peel_once_rolling (struct loop *, int); static void decide_peel_completely (struct loop *, int); static void decide_unroll_stupid (struct loop *, int); static void decide_unroll_constant_iterations (struct loop *, int); static void decide_unroll_runtime_iterations (struct loop *, int); -static void peel_loop_simple (struct loops *, struct loop *); -static void peel_loop_completely (struct loops *, struct loop *); -static void unroll_loop_stupid (struct loops *, struct loop *); -static void unroll_loop_constant_iterations (struct loops *, struct loop *); -static void unroll_loop_runtime_iterations (struct loops *, struct loop *); +static void peel_loop_simple (struct loop *); +static void peel_loop_completely (struct loop *); +static void unroll_loop_stupid (struct loop *); +static void unroll_loop_constant_iterations (struct loop *); +static void unroll_loop_runtime_iterations (struct loop *); static struct opt_info *analyze_insns_in_loop (struct loop *); static void opt_info_start_duplication (struct opt_info *); static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool); static void free_opt_info (struct opt_info *); static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx); -static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx); +static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *); static struct iv_to_split *analyze_iv_to_split_insn (rtx); static void expand_var_during_unrolling (struct var_to_expand *, rtx); -static int insert_var_expansion_initialization (void **, void *); -static int combine_var_copies_in_loop_exit (void **, void *); -static int release_var_copies (void **, void *); +static void insert_var_expansion_initialization (struct var_to_expand *, + basic_block); +static void combine_var_copies_in_loop_exit (struct var_to_expand *, + basic_block); static rtx get_expansion (struct var_to_expand *); /* Unroll and/or peel (depending on FLAGS) LOOPS. */ void -unroll_and_peel_loops (struct loops *loops, int flags) +unroll_and_peel_loops (int flags) { - struct loop *loop, *next; + struct loop *loop; bool check; + loop_iterator li; /* First perform complete loop peeling (it is almost surely a win, and affects parameters for further decision a lot). */ - peel_loops_completely (loops, flags); + peel_loops_completely (flags); /* Now decide rest of unrolling and peeling. */ - decide_unrolling_and_peeling (loops, flags); - - loop = loops->tree_root; - while (loop->inner) - loop = loop->inner; + decide_unrolling_and_peeling (flags); /* Scan the loops, inner ones first. */ - while (loop != loops->tree_root) + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { - if (loop->next) - { - next = loop->next; - while (next->inner) - next = next->inner; - } - else - next = loop->outer; - check = true; /* And perform the appropriate transformations. */ switch (loop->lpt_decision.decision) @@ -181,16 +178,16 @@ unroll_and_peel_loops (struct loops *loops, int flags) /* Already done. */ gcc_unreachable (); case LPT_PEEL_SIMPLE: - peel_loop_simple (loops, loop); + peel_loop_simple (loop); break; case LPT_UNROLL_CONSTANT: - unroll_loop_constant_iterations (loops, loop); + unroll_loop_constant_iterations (loop); break; case LPT_UNROLL_RUNTIME: - unroll_loop_runtime_iterations (loops, loop); + unroll_loop_runtime_iterations (loop); break; case LPT_UNROLL_STUPID: - unroll_loop_stupid (loops, loop); + unroll_loop_stupid (loop); break; case LPT_NONE: check = false; @@ -202,10 +199,9 @@ unroll_and_peel_loops (struct loops *loops, int flags) { #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); - verify_loop_structure (loops); + verify_loop_structure (); #endif } - loop = next; } iv_analysis_done (); @@ -232,27 +228,16 @@ loop_exit_at_end_p (struct loop *loop) return true; } -/* Check whether to peel LOOPS (depending on FLAGS) completely and do so. */ +/* Depending on FLAGS, check whether to peel loops completely and do so. */ static void -peel_loops_completely (struct loops *loops, int flags) +peel_loops_completely (int flags) { - struct loop *loop, *next; - - loop = loops->tree_root; - while (loop->inner) - loop = loop->inner; + struct loop *loop; + loop_iterator li; - while (loop != loops->tree_root) + /* Scan the loops, the inner ones first. */ + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { - if (loop->next) - { - next = loop->next; - while (next->inner) - next = next->inner; - } - else - next = loop->outer; - loop->lpt_decision.decision = LPT_NONE; if (dump_file) @@ -268,48 +253,35 @@ peel_loops_completely (struct loops *loops, int flags) if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY) { - peel_loop_completely (loops, loop); + peel_loop_completely (loop); #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); - verify_loop_structure (loops); + verify_loop_structure (); #endif } - loop = next; } } -/* Decide whether unroll or peel LOOPS (depending on FLAGS) and how much. */ +/* Decide whether unroll or peel loops (depending on FLAGS) and how much. */ static void -decide_unrolling_and_peeling (struct loops *loops, int flags) +decide_unrolling_and_peeling (int flags) { - struct loop *loop = loops->tree_root, *next; - - while (loop->inner) - loop = loop->inner; + struct loop *loop; + loop_iterator li; /* Scan the loops, inner ones first. */ - while (loop != loops->tree_root) + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { - if (loop->next) - { - next = loop->next; - while (next->inner) - next = next->inner; - } - else - next = loop->outer; - loop->lpt_decision.decision = LPT_NONE; if (dump_file) fprintf (dump_file, "\n;; *** Considering loop %d ***\n", loop->num); /* Do not peel cold areas. */ - if (!maybe_hot_bb_p (loop->header)) + if (optimize_loop_for_size_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cold area\n"); - loop = next; continue; } @@ -319,7 +291,6 @@ decide_unrolling_and_peeling (struct loops *loops, int flags) if (dump_file) fprintf (dump_file, ";; Not considering loop, cannot duplicate\n"); - loop = next; continue; } @@ -328,7 +299,6 @@ decide_unrolling_and_peeling (struct loops *loops, int flags) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is not innermost\n"); - loop = next; continue; } @@ -345,8 +315,6 @@ decide_unrolling_and_peeling (struct loops *loops, int flags) decide_unroll_stupid (loop, flags); if (loop->lpt_decision.decision == LPT_NONE) decide_peel_simple (loop, flags); - - loop = next; } } @@ -409,7 +377,7 @@ decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED) } /* Do not peel cold areas. */ - if (!maybe_hot_bb_p (loop->header)) + if (optimize_loop_for_size_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cold area\n"); @@ -486,38 +454,38 @@ decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED) body; i++; */ static void -peel_loop_completely (struct loops *loops, struct loop *loop) +peel_loop_completely (struct loop *loop) { sbitmap wont_exit; unsigned HOST_WIDE_INT npeel; - unsigned n_remove_edges, i; - edge *remove_edges, ein; + unsigned i; + VEC (edge, heap) *remove_edges; + edge ein; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; - + npeel = desc->niter; if (npeel) { bool ok; - + wont_exit = sbitmap_alloc (npeel + 1); sbitmap_ones (wont_exit); RESET_BIT (wont_exit, 0); if (desc->noloop_assumptions) RESET_BIT (wont_exit, 1); - remove_edges = XCNEWVEC (edge, npeel); - n_remove_edges = 0; + remove_edges = NULL; if (flag_split_ivs_in_unroller) opt_info = analyze_insns_in_loop (loop); - + opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, npeel, + npeel, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | DLTHE_FLAG_COMPLETTE_PEEL | (opt_info @@ -525,7 +493,7 @@ peel_loop_completely (struct loops *loops, struct loop *loop) gcc_assert (ok); free (wont_exit); - + if (opt_info) { apply_opt_in_copies (opt_info, npeel, false, true); @@ -533,9 +501,9 @@ peel_loop_completely (struct loops *loops, struct loop *loop) } /* Remove the exit edges. */ - for (i = 0; i < n_remove_edges; i++) - remove_path (loops, remove_edges[i]); - free (remove_edges); + for (i = 0; VEC_iterate (edge, remove_edges, i, ein); i++) + remove_path (ein); + VEC_free (edge, heap, remove_edges); } ein = desc->in_edge; @@ -543,7 +511,7 @@ peel_loop_completely (struct loops *loops, struct loop *loop) /* Now remove the unreachable part of the last iteration and cancel the loop. */ - remove_path (loops, ein); + remove_path (ein); if (dump_file) fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel); @@ -642,7 +610,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) loop->lpt_decision.decision = LPT_UNROLL_CONSTANT; loop->lpt_decision.times = best_unroll; - + if (dump_file) fprintf (dump_file, ";; Decided to unroll the constant times rolling loop, %d times.\n", @@ -669,19 +637,20 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) } */ static void -unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) +unroll_loop_constant_iterations (struct loop *loop) { unsigned HOST_WIDE_INT niter; unsigned exit_mod; sbitmap wont_exit; - unsigned n_remove_edges, i; - edge *remove_edges; + unsigned i; + VEC (edge, heap) *remove_edges; + edge e; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); bool exit_at_end = loop_exit_at_end_p (loop); struct opt_info *opt_info = NULL; bool ok; - + niter = desc->niter; /* Should not get here (such loop should be peeled instead). */ @@ -692,12 +661,11 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) wont_exit = sbitmap_alloc (max_unroll + 1); sbitmap_ones (wont_exit); - remove_edges = XCNEWVEC (edge, max_unroll + exit_mod + 1); - n_remove_edges = 0; - if (flag_split_ivs_in_unroller + remove_edges = NULL; + if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); - + if (!exit_at_end) { /* The exit is not at the end of the loop; leave exit test @@ -716,9 +684,9 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) { opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, exit_mod, + exit_mod, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info && exit_mod > 1 ? DLTHE_RECORD_COPY_NUMBER @@ -726,8 +694,8 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) gcc_assert (ok); if (opt_info && exit_mod > 1) - apply_opt_in_copies (opt_info, exit_mod, false, false); - + apply_opt_in_copies (opt_info, exit_mod, false, false); + desc->noloop_assumptions = NULL_RTX; desc->niter -= exit_mod; desc->niter_max -= exit_mod; @@ -752,18 +720,18 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) RESET_BIT (wont_exit, 0); if (desc->noloop_assumptions) RESET_BIT (wont_exit, 1); - + opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, exit_mod + 1, + exit_mod + 1, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info && exit_mod > 0 ? DLTHE_RECORD_COPY_NUMBER : 0)); gcc_assert (ok); - + if (opt_info && exit_mod > 0) apply_opt_in_copies (opt_info, exit_mod + 1, false, false); @@ -779,12 +747,12 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) } /* Now unroll the loop. */ - + opt_info_start_duplication (opt_info); ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), - loops, max_unroll, + max_unroll, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER @@ -803,7 +771,7 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) { basic_block exit_block = get_bb_copy (desc->in_edge->src); /* Find a new in and out edge; they are in the last copy we have made. */ - + if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest) { desc->out_edge = EDGE_SUCC (exit_block, 0); @@ -821,9 +789,9 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) desc->niter_expr = GEN_INT (desc->niter); /* Remove the edges. */ - for (i = 0; i < n_remove_edges; i++) - remove_path (loops, remove_edges[i]); - free (remove_edges); + for (i = 0; VEC_iterate (edge, remove_edges, i, e); i++) + remove_path (e); + VEC_free (edge, heap, remove_edges); if (dump_file) fprintf (dump_file, @@ -859,6 +827,9 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); + if (targetm.loop_unroll_adjust) + nunroll = targetm.loop_unroll_adjust (nunroll, loop); + /* Skip big loops. */ if (nunroll <= 1) { @@ -902,7 +873,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) loop->lpt_decision.decision = LPT_UNROLL_RUNTIME; loop->lpt_decision.times = i - 1; - + if (dump_file) fprintf (dump_file, ";; Decided to unroll the runtime computable " @@ -910,6 +881,53 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) loop->lpt_decision.times); } +/* Splits edge E and inserts the sequence of instructions INSNS on it, and + returns the newly created block. If INSNS is NULL_RTX, nothing is changed + and NULL is returned instead. */ + +basic_block +split_edge_and_insert (edge e, rtx insns) +{ + basic_block bb; + + if (!insns) + return NULL; + bb = split_edge (e); + emit_insn_after (insns, BB_END (bb)); + + /* ??? We used to assume that INSNS can contain control flow insns, and + that we had to try to find sub basic blocks in BB to maintain a valid + CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB + and call break_superblocks when going out of cfglayout mode. But it + turns out that this never happens; and that if it does ever happen, + the verify_flow_info call in loop_optimizer_finalize would fail. + + There are two reasons why we expected we could have control flow insns + in INSNS. The first is when a comparison has to be done in parts, and + the second is when the number of iterations is computed for loops with + the number of iterations known at runtime. In both cases, test cases + to get control flow in INSNS appear to be impossible to construct: + + * If do_compare_rtx_and_jump needs several branches to do comparison + in a mode that needs comparison by parts, we cannot analyze the + number of iterations of the loop, and we never get to unrolling it. + + * The code in expand_divmod that was suspected to cause creation of + branching code seems to be only accessed for signed division. The + divisions used by # of iterations analysis are always unsigned. + Problems might arise on architectures that emits branching code + for some operations that may appear in the unroller (especially + for division), but we have no such architectures. + + Considering all this, it was decided that we should for now assume + that INSNS can in theory contain control flow insns, but in practice + it never does. So we don't handle the theoretical case, and should + a real failure ever show up, we have a pretty good clue for how to + fix it. */ + + return bb; +} + /* Unroll LOOP for that we are able to count number of iterations in runtime LOOP->LPT_DECISION.TIMES + 1 times. The transformation does this (with some extra care for case n < 0): @@ -942,43 +960,43 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) } */ static void -unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) +unroll_loop_runtime_iterations (struct loop *loop) { rtx old_niter, niter, init_code, branch_code, tmp; unsigned i, j, p; - basic_block preheader, *body, *dom_bbs, swtch, ezc_swtch; - unsigned n_dom_bbs; + basic_block preheader, *body, swtch, ezc_swtch; + VEC (basic_block, heap) *dom_bbs; sbitmap wont_exit; int may_exit_copy; - unsigned n_peel, n_remove_edges; - edge *remove_edges, e; + unsigned n_peel; + VEC (edge, heap) *remove_edges; + edge e; bool extra_zero_check, last_may_exit; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); bool exit_at_end = loop_exit_at_end_p (loop); struct opt_info *opt_info = NULL; bool ok; - + if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); - + /* Remember blocks whose dominators will have to be updated. */ - dom_bbs = XCNEWVEC (basic_block, n_basic_blocks); - n_dom_bbs = 0; + dom_bbs = NULL; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { - unsigned nldom; - basic_block *ldom; + VEC (basic_block, heap) *ldom; + basic_block bb; - nldom = get_dominated_by (CDI_DOMINATORS, body[i], &ldom); - for (j = 0; j < nldom; j++) - if (!flow_bb_inside_loop_p (loop, ldom[j])) - dom_bbs[n_dom_bbs++] = ldom[j]; + ldom = get_dominated_by (CDI_DOMINATORS, body[i]); + for (j = 0; VEC_iterate (basic_block, ldom, j, bb); j++) + if (!flow_bb_inside_loop_p (loop, bb)) + VEC_safe_push (basic_block, heap, dom_bbs, bb); - free (ldom); + VEC_free (basic_block, heap, ldom); } free (body); @@ -1018,12 +1036,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) init_code = get_insns (); end_sequence (); + unshare_all_rtl_in_chain (init_code); /* Precondition the loop. */ - loop_split_edge_with (loop_preheader_edge (loop), init_code); + split_edge_and_insert (loop_preheader_edge (loop), init_code); - remove_edges = XCNEWVEC (edge, max_unroll + n_peel + 1); - n_remove_edges = 0; + remove_edges = NULL; wont_exit = sbitmap_alloc (max_unroll + 2); @@ -1037,15 +1055,13 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) SET_BIT (wont_exit, 1); ezc_swtch = loop_preheader_edge (loop)->src; ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, 1, - wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + 1, wont_exit, desc->out_edge, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ); gcc_assert (ok); /* Record the place where switch will be built for preconditioning. */ - swtch = loop_split_edge_with (loop_preheader_edge (loop), - NULL_RTX); + swtch = split_edge (loop_preheader_edge (loop)); for (i = 0; i < n_peel; i++) { @@ -1054,9 +1070,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) if (i != n_peel - 1 || !last_may_exit) SET_BIT (wont_exit, 1); ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, 1, - wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + 1, wont_exit, desc->out_edge, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ); gcc_assert (ok); @@ -1064,12 +1079,16 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) j = n_peel - i - (extra_zero_check ? 0 : 1); p = REG_BR_PROB_BASE / (i + 2); - preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); + preheader = split_edge (loop_preheader_edge (loop)); branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ, block_label (preheader), p, NULL_RTX); - swtch = loop_split_edge_with (single_pred_edge (swtch), branch_code); + /* We rely on the fact that the compare and jump cannot be optimized out, + and hence the cfg we create is correct. */ + gcc_assert (branch_code != NULL_RTX); + + swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, @@ -1082,12 +1101,13 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) /* Add branch for zero iterations. */ p = REG_BR_PROB_BASE / (max_unroll + 1); swtch = ezc_swtch; - preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); + preheader = split_edge (loop_preheader_edge (loop)); branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ, block_label (preheader), p, NULL_RTX); + gcc_assert (branch_code != NULL_RTX); - swtch = loop_split_edge_with (single_succ_edge (swtch), branch_code); + swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, @@ -1096,24 +1116,24 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) } /* Recount dominators for outer blocks. */ - iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs); + iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); /* And unroll loop. */ sbitmap_ones (wont_exit); RESET_BIT (wont_exit, may_exit_copy); opt_info_start_duplication (opt_info); - + ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), - loops, max_unroll, + max_unroll, wont_exit, desc->out_edge, - remove_edges, &n_remove_edges, + &remove_edges, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER : 0)); gcc_assert (ok); - + if (opt_info) { apply_opt_in_copies (opt_info, max_unroll, true, true); @@ -1127,7 +1147,7 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) basic_block exit_block = get_bb_copy (desc->in_edge->src); /* Find a new in and out edge; they are in the last copy we have made. */ - + if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest) { desc->out_edge = EDGE_SUCC (exit_block, 0); @@ -1141,9 +1161,9 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) } /* Remove the edges. */ - for (i = 0; i < n_remove_edges; i++) - remove_path (loops, remove_edges[i]); - free (remove_edges); + for (i = 0; VEC_iterate (edge, remove_edges, i, e); i++) + remove_path (e); + VEC_free (edge, heap, remove_edges); /* We must be careful when updating the number of iterations due to preconditioning and the fact that the value must be valid at entry @@ -1167,6 +1187,8 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) ";; Unrolled loop %d times, counting # of iterations " "in runtime, %i insns\n", max_unroll, num_loop_insns (loop)); + + VEC_free (basic_block, heap, dom_bbs); } /* Decide whether to simply peel LOOP and how much. */ @@ -1248,7 +1270,7 @@ decide_peel_simple (struct loop *loop, int flags) /* Success. */ loop->lpt_decision.decision = LPT_PEEL_SIMPLE; loop->lpt_decision.times = npeel; - + if (dump_file) fprintf (dump_file, ";; Decided to simply peel the loop, %d times.\n", loop->lpt_decision.times); @@ -1269,25 +1291,24 @@ decide_peel_simple (struct loop *loop, int flags) end: ; */ static void -peel_loop_simple (struct loops *loops, struct loop *loop) +peel_loop_simple (struct loop *loop) { sbitmap wont_exit; unsigned npeel = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; bool ok; - + if (flag_split_ivs_in_unroller && npeel > 1) opt_info = analyze_insns_in_loop (loop); - + wont_exit = sbitmap_alloc (npeel + 1); sbitmap_zero (wont_exit); - + opt_info_start_duplication (opt_info); - + ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), - loops, npeel, wont_exit, - NULL, NULL, + npeel, wont_exit, NULL, NULL, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER @@ -1295,7 +1316,7 @@ peel_loop_simple (struct loops *loops, struct loop *loop) gcc_assert (ok); free (wont_exit); - + if (opt_info) { apply_opt_in_copies (opt_info, npeel, false, false); @@ -1349,6 +1370,9 @@ decide_unroll_stupid (struct loop *loop, int flags) if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); + if (targetm.loop_unroll_adjust) + nunroll = targetm.loop_unroll_adjust (nunroll, loop); + /* Skip big loops. */ if (nunroll <= 1) { @@ -1394,7 +1418,7 @@ decide_unroll_stupid (struct loop *loop, int flags) loop->lpt_decision.decision = LPT_UNROLL_STUPID; loop->lpt_decision.times = i - 1; - + if (dump_file) fprintf (dump_file, ";; Decided to unroll the loop stupidly, %d times.\n", @@ -1419,32 +1443,32 @@ decide_unroll_stupid (struct loop *loop, int flags) } */ static void -unroll_loop_stupid (struct loops *loops, struct loop *loop) +unroll_loop_stupid (struct loop *loop) { sbitmap wont_exit; unsigned nunroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); struct opt_info *opt_info = NULL; bool ok; - + if (flag_split_ivs_in_unroller || flag_variable_expansion_in_unroller) opt_info = analyze_insns_in_loop (loop); - - + + wont_exit = sbitmap_alloc (nunroll + 1); sbitmap_zero (wont_exit); opt_info_start_duplication (opt_info); - + ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), - loops, nunroll, wont_exit, - NULL, NULL, NULL, + nunroll, wont_exit, + NULL, NULL, DLTHE_FLAG_UPDATE_FREQ | (opt_info ? DLTHE_RECORD_COPY_NUMBER : 0)); gcc_assert (ok); - + if (opt_info) { apply_opt_in_copies (opt_info, nunroll, true, true); @@ -1474,7 +1498,7 @@ unroll_loop_stupid (struct loops *loops, struct loop *loop) static hashval_t si_info_hash (const void *ivts) { - return htab_hash_pointer (((struct iv_to_split *) ivts)->insn); + return (hashval_t) INSN_UID (((const struct iv_to_split *) ivts)->insn); } /* An equality functions for information about insns to split. */ @@ -1482,8 +1506,8 @@ si_info_hash (const void *ivts) static int si_info_eq (const void *ivts1, const void *ivts2) { - const struct iv_to_split *i1 = ivts1; - const struct iv_to_split *i2 = ivts2; + const struct iv_to_split *const i1 = (const struct iv_to_split *) ivts1; + const struct iv_to_split *const i2 = (const struct iv_to_split *) ivts2; return i1->insn == i2->insn; } @@ -1493,54 +1517,88 @@ si_info_eq (const void *ivts1, const void *ivts2) static hashval_t ve_info_hash (const void *ves) { - return htab_hash_pointer (((struct var_to_expand *) ves)->insn); + return (hashval_t) INSN_UID (((const struct var_to_expand *) ves)->insn); } -/* Return true if IVTS1 and IVTS2 (which are really both of type +/* Return true if IVTS1 and IVTS2 (which are really both of type "var_to_expand *") refer to the same instruction. */ static int ve_info_eq (const void *ivts1, const void *ivts2) { - const struct var_to_expand *i1 = ivts1; - const struct var_to_expand *i2 = ivts2; - + const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1; + const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2; + return i1->insn == i2->insn; } -/* Returns true if REG is referenced in one insn in LOOP. */ +/* Returns true if REG is referenced in one nondebug insn in LOOP. + Set *DEBUG_USES to the number of debug insns that reference the + variable. */ bool -referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg) +referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg, + int *debug_uses) { basic_block *body, bb; unsigned i; int count_ref = 0; rtx insn; - - body = get_loop_body (loop); + + body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; - + FOR_BB_INSNS (bb, insn) - { - if (rtx_referenced_p (reg, insn)) - count_ref++; - } + if (!rtx_referenced_p (reg, insn)) + continue; + else if (DEBUG_INSN_P (insn)) + ++*debug_uses; + else if (++count_ref > 1) + break; } + free (body); return (count_ref == 1); } +/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */ + +static void +reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses) +{ + basic_block *body, bb; + unsigned i; + rtx insn; + + body = get_loop_body (loop); + for (i = 0; debug_uses && i < loop->num_nodes; i++) + { + bb = body[i]; + + FOR_BB_INSNS (bb, insn) + if (!DEBUG_INSN_P (insn) || !rtx_referenced_p (reg, insn)) + continue; + else + { + validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), + gen_rtx_UNKNOWN_VAR_LOC (), 0); + if (!--debug_uses) + break; + } + } + free (body); +} + /* Determine whether INSN contains an accumulator - which can be expanded into separate copies, + which can be expanded into separate copies, one for each copy of the LOOP body. - + for (i = 0 ; i < n; i++) sum += a[i]; - + ==> - + sum += a[i] .... i = i+1; @@ -1550,25 +1608,27 @@ referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg) sum2 += a[i]; .... - Return NULL if INSN contains no opportunity for expansion of accumulator. - Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant + Return NULL if INSN contains no opportunity for expansion of accumulator. + Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant information and return a pointer to it. */ static struct var_to_expand * analyze_insn_to_expand_var (struct loop *loop, rtx insn) { - rtx set, dest, src, op1; + rtx set, dest, src, op1, op2, something; struct var_to_expand *ves; enum machine_mode mode1, mode2; - + unsigned accum_pos; + int debug_uses = 0; + set = single_set (insn); if (!set) return NULL; - + dest = SET_DEST (set); src = SET_SRC (set); - + if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS && GET_CODE (src) != MULT) @@ -1586,45 +1646,78 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn) if (!have_insn_for (GET_CODE (src), GET_MODE (src))) return NULL; - if (!XEXP (src, 0)) - return NULL; - op1 = XEXP (src, 0); - + op2 = XEXP (src, 1); + if (!REG_P (dest) && !(GET_CODE (dest) == SUBREG && REG_P (SUBREG_REG (dest)))) return NULL; - - if (!rtx_equal_p (dest, op1)) - return NULL; - - if (!referenced_in_one_insn_in_loop_p (loop, dest)) + + if (rtx_equal_p (dest, op1)) + accum_pos = 0; + else if (rtx_equal_p (dest, op2)) + accum_pos = 1; + else return NULL; - - if (rtx_referenced_p (dest, XEXP (src, 1))) + + /* The method of expansion that we are using; which includes + the initialization of the expansions with zero and the summation of + the expansions at the end of the computation will yield wrong results + for (x = something - x) thus avoid using it in that case. */ + if (accum_pos == 1 + && GET_CODE (src) == MINUS) + return NULL; + + something = (accum_pos == 0) ? op2 : op1; + + if (rtx_referenced_p (dest, something)) return NULL; - - mode1 = GET_MODE (dest); - mode2 = GET_MODE (XEXP (src, 1)); - if ((FLOAT_MODE_P (mode1) - || FLOAT_MODE_P (mode2)) - && !flag_unsafe_math_optimizations) + + if (!referenced_in_one_insn_in_loop_p (loop, dest, &debug_uses)) return NULL; - + + mode1 = GET_MODE (dest); + mode2 = GET_MODE (something); + if ((FLOAT_MODE_P (mode1) + || FLOAT_MODE_P (mode2)) + && !flag_associative_math) + return NULL; + + if (dump_file) + { + fprintf (dump_file, + "\n;; Expanding Accumulator "); + print_rtl (dump_file, dest); + fprintf (dump_file, "\n"); + } + + if (debug_uses) + /* Instead of resetting the debug insns, we could replace each + debug use in the loop with the sum or product of all expanded + accummulators. Since we'll only know of all expansions at the + end, we'd have to keep track of which vars_to_expand a debug + insn in the loop references, take note of each copy of the + debug insn during unrolling, and when it's all done, compute + the sum or product of each variable and adjust the original + debug insn and each copy thereof. What a pain! */ + reset_debug_uses_in_loop (loop, dest, debug_uses); + /* Record the accumulator to expand. */ ves = XNEW (struct var_to_expand); ves->insn = insn; - ves->var_expansions = VEC_alloc (rtx, heap, 1); ves->reg = copy_rtx (dest); + ves->var_expansions = VEC_alloc (rtx, heap, 1); + ves->next = NULL; ves->op = GET_CODE (src); ves->expansion_count = 0; ves->reuse_expansion = 0; - return ves; + ves->accum_pos = accum_pos; + return ves; } /* Determine whether there is an induction variable in INSN that - we would like to split during unrolling. + we would like to split during unrolling. I.e. replace @@ -1644,7 +1737,7 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn) i = i0 + 2 ... - Return NULL if INSN contains no interesting IVs. Otherwise, allocate + Return NULL if INSN contains no interesting IVs. Otherwise, allocate an IV_TO_SPLIT structure, fill it with the relevant information and return a pointer to it. */ @@ -1670,7 +1763,17 @@ analyze_iv_to_split_insn (rtx insn) return NULL; ok = iv_analyze_result (insn, dest, &iv); - gcc_assert (ok); + + /* This used to be an assert under the assumption that if biv_p returns + true that iv_analyze_result must also return true. However, that + assumption is not strictly correct as evidenced by pr25569. + + Returning NULL when iv_analyze_result returns false is safe and + avoids the problems in pr25569 until the iv_analyze_* routines + can be fixed, which is apparently hard and time consuming + according to their author. */ + if (! ok) + return NULL; if (iv.step == const0_rtx || iv.mode != iv.extend_mode) @@ -1681,9 +1784,10 @@ analyze_iv_to_split_insn (rtx insn) ivts->insn = insn; ivts->base_var = NULL_RTX; ivts->step = iv.step; + ivts->next = NULL; ivts->n_loc = 1; ivts->loc[0] = 1; - + return ivts; } @@ -1696,45 +1800,52 @@ static struct opt_info * analyze_insns_in_loop (struct loop *loop) { basic_block *body, bb; - unsigned i, num_edges = 0; + unsigned i; struct opt_info *opt_info = XCNEW (struct opt_info); rtx insn; struct iv_to_split *ivts = NULL; struct var_to_expand *ves = NULL; PTR *slot1; PTR *slot2; - edge *edges = get_loop_exit_edges (loop, &num_edges); + VEC (edge, heap) *edges = get_loop_exit_edges (loop); + edge exit; bool can_apply = false; - + iv_analysis_loop_init (loop); body = get_loop_body (loop); if (flag_split_ivs_in_unroller) - opt_info->insns_to_split = htab_create (5 * loop->num_nodes, - si_info_hash, si_info_eq, free); - - /* Record the loop exit bb and loop preheader before the unrolling. */ - if (!loop_preheader_edge (loop)->src) { - loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); - opt_info->loop_preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); + opt_info->insns_to_split = htab_create (5 * loop->num_nodes, + si_info_hash, si_info_eq, free); + opt_info->iv_to_split_head = NULL; + opt_info->iv_to_split_tail = &opt_info->iv_to_split_head; } - else - opt_info->loop_preheader = loop_preheader_edge (loop)->src; - - if (num_edges == 1 - && !(edges[0]->flags & EDGE_COMPLEX)) + + /* Record the loop exit bb and loop preheader before the unrolling. */ + opt_info->loop_preheader = loop_preheader_edge (loop)->src; + + if (VEC_length (edge, edges) == 1) { - opt_info->loop_exit = loop_split_edge_with (edges[0], NULL_RTX); - can_apply = true; + exit = VEC_index (edge, edges, 0); + if (!(exit->flags & EDGE_COMPLEX)) + { + opt_info->loop_exit = split_edge (exit); + can_apply = true; + } } - + if (flag_variable_expansion_in_unroller && can_apply) - opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes, - ve_info_hash, ve_info_eq, free); - + { + opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes, + ve_info_hash, + ve_info_eq, free); + opt_info->var_to_expand_head = NULL; + opt_info->var_to_expand_tail = &opt_info->var_to_expand_head; + } + for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; @@ -1745,29 +1856,35 @@ analyze_insns_in_loop (struct loop *loop) { if (!INSN_P (insn)) continue; - + if (opt_info->insns_to_split) ivts = analyze_iv_to_split_insn (insn); - + if (ivts) { slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT); + gcc_assert (*slot1 == NULL); *slot1 = ivts; + *opt_info->iv_to_split_tail = ivts; + opt_info->iv_to_split_tail = &ivts->next; continue; } - + if (opt_info->insns_with_var_to_expand) ves = analyze_insn_to_expand_var (loop, insn); - + if (ves) { slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT); + gcc_assert (*slot2 == NULL); *slot2 = ves; + *opt_info->var_to_expand_tail = ves; + opt_info->var_to_expand_tail = &ves->next; } } } - - free (edges); + + VEC_free (edge, heap, edges); free (body); return opt_info; } @@ -1775,7 +1892,7 @@ analyze_insns_in_loop (struct loop *loop) /* Called just before loop duplication. Records start of duplicated area to OPT_INFO. */ -static void +static void opt_info_start_duplication (struct opt_info *opt_info) { if (opt_info) @@ -1822,18 +1939,14 @@ get_ivts_expr (rtx expr, struct iv_to_split *ivts) return ret; } -/* Allocate basic variable for the induction variable chain. Callback for - htab_traverse. */ +/* Allocate basic variable for the induction variable chain. */ -static int -allocate_basic_variable (void **slot, void *data ATTRIBUTE_UNUSED) +static void +allocate_basic_variable (struct iv_to_split *ivts) { - struct iv_to_split *ivts = *slot; rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts); ivts->base_var = gen_reg_rtx (GET_MODE (expr)); - - return 1; } /* Insert initialization of basic variable of IVTS before INSN, taking @@ -1892,7 +2005,7 @@ split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta) seq = get_insns (); end_sequence (); emit_insn_before (seq, insn); - + if (validate_change (insn, loc, var, 0)) return; @@ -1910,7 +2023,7 @@ split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta) emit_move_insn (dest, src); seq = get_insns (); end_sequence (); - + emit_insn_before (seq, insn); delete_insn (insn); } @@ -1922,22 +2035,22 @@ static rtx get_expansion (struct var_to_expand *ve) { rtx reg; - + if (ve->reuse_expansion == 0) reg = ve->reg; else reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1); - + if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion) ve->reuse_expansion = 0; - else + else ve->reuse_expansion++; - + return reg; } -/* Given INSN replace the uses of the accumulator recorded in VE +/* Given INSN replace the uses of the accumulator recorded in VE with a new register. */ static void @@ -1945,10 +2058,10 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn) { rtx new_reg, set; bool really_new_expansion = false; - + set = single_set (insn); gcc_assert (set); - + /* Generate a new register only if the expansion limit has not been reached. Else reuse an already existing expansion. */ if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count) @@ -1960,8 +2073,8 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn) new_reg = get_expansion (ve); validate_change (insn, &SET_DEST (set), new_reg, 1); - validate_change (insn, &XEXP (SET_SRC (set), 0), new_reg, 1); - + validate_change (insn, &XEXP (SET_SRC (set), ve->accum_pos), new_reg, 1); + if (apply_change_group ()) if (really_new_expansion) { @@ -1970,27 +2083,54 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn) } } -/* Initialize the variable expansions in loop preheader. - Callbacks for htab_traverse. PLACE_P is the loop-preheader - basic block where the initialization of the expansions - should take place. */ +/* Initialize the variable expansions in loop preheader. PLACE is the + loop-preheader basic block where the initialization of the + expansions should take place. The expansions are initialized with + (-0) when the operation is plus or minus to honor sign zero. This + way we can prevent cases where the sign of the final result is + effected by the sign of the expansion. Here is an example to + demonstrate this: -static int -insert_var_expansion_initialization (void **slot, void *place_p) + for (i = 0 ; i < n; i++) + sum += something; + + ==> + + sum += something + .... + i = i+1; + sum1 += something + .... + i = i+1 + sum2 += something; + .... + + When SUM is initialized with -zero and SOMETHING is also -zero; the + final result of sum should be -zero thus the expansions sum1 and sum2 + should be initialized with -zero as well (otherwise we will get +zero + as the final result). */ + +static void +insert_var_expansion_initialization (struct var_to_expand *ve, + basic_block place) { - struct var_to_expand *ve = *slot; - basic_block place = (basic_block)place_p; rtx seq, var, zero_init, insn; unsigned i; - + enum machine_mode mode = GET_MODE (ve->reg); + bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode); + if (VEC_length (rtx, ve->var_expansions) == 0) - return 1; - + return; + start_sequence (); - if (ve->op == PLUS || ve->op == MINUS) + if (ve->op == PLUS || ve->op == MINUS) for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++) { - zero_init = CONST0_RTX (GET_MODE (var)); + if (honor_signed_zero_p) + zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode); + else + zero_init = CONST0_RTX (mode); + emit_move_insn (var, zero_init); } else if (ve->op == MULT) @@ -1999,36 +2139,31 @@ insert_var_expansion_initialization (void **slot, void *place_p) zero_init = CONST1_RTX (GET_MODE (var)); emit_move_insn (var, zero_init); } - + seq = get_insns (); end_sequence (); - + insn = BB_HEAD (place); while (!NOTE_INSN_BASIC_BLOCK_P (insn)) insn = NEXT_INSN (insn); - - emit_insn_after (seq, insn); - /* Continue traversing the hash table. */ - return 1; + + emit_insn_after (seq, insn); } -/* Combine the variable expansions at the loop exit. - Callbacks for htab_traverse. PLACE_P is the loop exit - basic block where the summation of the expansions should - take place. */ +/* Combine the variable expansions at the loop exit. PLACE is the + loop exit basic block where the summation of the expansions should + take place. */ -static int -combine_var_copies_in_loop_exit (void **slot, void *place_p) +static void +combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place) { - struct var_to_expand *ve = *slot; - basic_block place = (basic_block)place_p; rtx sum = ve->reg; rtx expr, seq, var, insn; unsigned i; if (VEC_length (rtx, ve->var_expansions) == 0) - return 1; - + return; + start_sequence (); if (ve->op == PLUS || ve->op == MINUS) for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++) @@ -2042,35 +2177,32 @@ combine_var_copies_in_loop_exit (void **slot, void *place_p) sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum); } - + expr = force_operand (sum, ve->reg); if (expr != ve->reg) emit_move_insn (ve->reg, expr); seq = get_insns (); end_sequence (); - + insn = BB_HEAD (place); while (!NOTE_INSN_BASIC_BLOCK_P (insn)) insn = NEXT_INSN (insn); emit_insn_after (seq, insn); - - /* Continue traversing the hash table. */ - return 1; } -/* Apply loop optimizations in loop copies using the - data which gathered during the unrolling. Structure +/* Apply loop optimizations in loop copies using the + data which gathered during the unrolling. Structure OPT_INFO record that data. - + UNROLLING is true if we unrolled (not peeled) the loop. REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of the loop (as it should happen in complete unrolling, but not in ordinary peeling of the loop). */ static void -apply_opt_in_copies (struct opt_info *opt_info, - unsigned n_copies, bool unrolling, +apply_opt_in_copies (struct opt_info *opt_info, + unsigned n_copies, bool unrolling, bool rewrite_original_loop) { unsigned i, delta; @@ -2078,20 +2210,21 @@ apply_opt_in_copies (struct opt_info *opt_info, rtx insn, orig_insn, next; struct iv_to_split ivts_templ, *ivts; struct var_to_expand ve_templ, *ves; - + /* Sanity check -- we need to put initialization in the original loop body. */ gcc_assert (!unrolling || rewrite_original_loop); - + /* Allocate the basic variables (i0). */ if (opt_info->insns_to_split) - htab_traverse (opt_info->insns_to_split, allocate_basic_variable, NULL); - + for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next) + allocate_basic_variable (ivts); + for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++) { bb = BASIC_BLOCK (i); orig_bb = get_bb_original (bb); - + /* bb->aux holds position in copy sequence initialized by duplicate_loop_to_header_edge. */ delta = determine_split_iv_delta ((size_t)bb->aux, n_copies, @@ -2103,23 +2236,24 @@ apply_opt_in_copies (struct opt_info *opt_info, next = NEXT_INSN (insn); if (!INSN_P (insn)) continue; - + while (!INSN_P (orig_insn)) orig_insn = NEXT_INSN (orig_insn); - + ivts_templ.insn = orig_insn; ve_templ.insn = orig_insn; - + /* Apply splitting iv optimization. */ if (opt_info->insns_to_split) { - ivts = htab_find (opt_info->insns_to_split, &ivts_templ); - + ivts = (struct iv_to_split *) + htab_find (opt_info->insns_to_split, &ivts_templ); + if (ivts) { gcc_assert (GET_CODE (PATTERN (insn)) == GET_CODE (PATTERN (orig_insn))); - + if (!delta) insert_base_initialization (ivts, insn); split_iv (ivts, insn, delta); @@ -2128,9 +2262,10 @@ apply_opt_in_copies (struct opt_info *opt_info, /* Apply variable expansion optimization. */ if (unrolling && opt_info->insns_with_var_to_expand) { - ves = htab_find (opt_info->insns_with_var_to_expand, &ve_templ); + ves = (struct var_to_expand *) + htab_find (opt_info->insns_with_var_to_expand, &ve_templ); if (ves) - { + { gcc_assert (GET_CODE (PATTERN (insn)) == GET_CODE (PATTERN (orig_insn))); expand_var_during_unrolling (ves, insn); @@ -2142,19 +2277,17 @@ apply_opt_in_copies (struct opt_info *opt_info, if (!rewrite_original_loop) return; - + /* Initialize the variable expansions in the loop preheader - and take care of combining them at the loop exit. */ + and take care of combining them at the loop exit. */ if (opt_info->insns_with_var_to_expand) { - htab_traverse (opt_info->insns_with_var_to_expand, - insert_var_expansion_initialization, - opt_info->loop_preheader); - htab_traverse (opt_info->insns_with_var_to_expand, - combine_var_copies_in_loop_exit, - opt_info->loop_exit); + for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) + insert_var_expansion_initialization (ves, opt_info->loop_preheader); + for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) + combine_var_copies_in_loop_exit (ves, opt_info->loop_exit); } - + /* Rewrite also the original loop body. Find them as originals of the blocks in the last copied iteration, i.e. those that have get_bb_copy (get_bb_original (bb)) == bb. */ @@ -2164,21 +2297,22 @@ apply_opt_in_copies (struct opt_info *opt_info, orig_bb = get_bb_original (bb); if (get_bb_copy (orig_bb) != bb) continue; - + delta = determine_split_iv_delta (0, n_copies, unrolling); for (orig_insn = BB_HEAD (orig_bb); orig_insn != NEXT_INSN (BB_END (bb)); orig_insn = next) { next = NEXT_INSN (orig_insn); - + if (!INSN_P (orig_insn)) continue; - + ivts_templ.insn = orig_insn; if (opt_info->insns_to_split) { - ivts = htab_find (opt_info->insns_to_split, &ivts_templ); + ivts = (struct iv_to_split *) + htab_find (opt_info->insns_to_split, &ivts_templ); if (ivts) { if (!delta) @@ -2187,25 +2321,11 @@ apply_opt_in_copies (struct opt_info *opt_info, continue; } } - + } } } -/* Release the data structures used for the variable expansion - optimization. Callbacks for htab_traverse. */ - -static int -release_var_copies (void **slot, void *data ATTRIBUTE_UNUSED) -{ - struct var_to_expand *ve = *slot; - - VEC_free (rtx, heap, ve->var_expansions); - - /* Continue traversing the hash table. */ - return 1; -} - /* Release OPT_INFO. */ static void @@ -2215,8 +2335,10 @@ free_opt_info (struct opt_info *opt_info) htab_delete (opt_info->insns_to_split); if (opt_info->insns_with_var_to_expand) { - htab_traverse (opt_info->insns_with_var_to_expand, - release_var_copies, NULL); + struct var_to_expand *ves; + + for (ves = opt_info->var_to_expand_head; ves; ves = ves->next) + VEC_free (rtx, heap, ves->var_expansions); htab_delete (opt_info->insns_with_var_to_expand); } free (opt_info);