X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Flcm.c;h=b568b06b99b56532a3338606d783cd857b35a415;hb=e561fc1cb16687a153589e70e16ede8de11116af;hp=0e47507752d6fb264dd75087f1c5dd4f66fd7bde;hpb=f0af5a8826645f54448b3b82f1ab364bd8952562;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/lcm.c b/gcc/lcm.c index 0e47507752d..b568b06b99b 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -1,5 +1,5 @@ /* Generic partial redundancy elimination with lazy code motion support. - Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 + Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. @@ -102,6 +102,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, edge e; basic_block *worklist, *qin, *qout, *qend; unsigned int qlen; + edge_iterator ei; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -126,7 +127,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) e->src->aux = EXIT_BLOCK_PTR; /* Iterate until the worklist is empty. */ @@ -157,7 +158,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* If the in state of this block changed, then we need to add the predecessors of this block to the worklist if they are not already on the worklist. */ - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (!e->src->aux && e->src != ENTRY_BLOCK_PTR) { *qin++ = e->src; @@ -251,6 +252,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; + edge_iterator ei; num_edges = NUM_EDGES (edge_list); @@ -280,7 +282,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, do not want to be overly optimistic. Consider an outgoing edge from the entry block. That edge should always have a LATER value the same as EARLIEST for that edge. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]); /* Add all the blocks to the worklist. This prevents an early exit from @@ -290,10 +292,11 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, *qin++ = bb; bb->aux = bb; } - qin = worklist; + /* Note that we do not use the last allocated element for our queue, as EXIT_BLOCK is never inserted into it. In fact the above allocation of n_basic_blocks + 1 elements is not necessary. */ + qin = worklist; qend = &worklist[n_basic_blocks]; qlen = n_basic_blocks; @@ -309,11 +312,12 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, /* Compute the intersection of LATERIN for each incoming edge to B. */ sbitmap_ones (laterin[bb->index]); - for (e = bb->pred; e != NULL; e = e->pred_next) - sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); + FOR_EACH_EDGE (e, ei, bb->preds) + sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], + later[(size_t)e->aux]); /* Calculate LATER for all outgoing edges. */ - for (e = bb->succ; e != NULL; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], earliest[(size_t) e->aux], laterin[e->src->index], @@ -334,7 +338,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, for the EXIT block. We allocated an extra entry in the LATERIN array for just this purpose. */ sbitmap_ones (laterin[last_basic_block]); - for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) sbitmap_a_and_b (laterin[last_basic_block], laterin[last_basic_block], later[(size_t) e->aux]); @@ -354,7 +358,8 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc, basic_block bb; FOR_EACH_BB (bb) - sbitmap_difference (delete[bb->index], antloc[bb->index], laterin[bb->index]); + sbitmap_difference (delete[bb->index], antloc[bb->index], + laterin[bb->index]); for (x = 0; x < NUM_EDGES (edge_list); x++) { @@ -475,6 +480,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; + edge_iterator ei; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -498,7 +504,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) e->dest->aux = ENTRY_BLOCK_PTR; /* Iterate until the worklist is empty. */ @@ -526,11 +532,12 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, sbitmap_intersection_of_preds (avin[bb->index], avout, bb->index); } - if (sbitmap_union_of_diff_cg (avout[bb->index], avloc[bb->index], avin[bb->index], kill[bb->index])) + if (sbitmap_union_of_diff_cg (avout[bb->index], avloc[bb->index], + avin[bb->index], kill[bb->index])) /* If the out state of this block changed, then we need to add the successors of this block to the worklist if they are not already on the worklist. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) { *qin++ = e->dest; @@ -600,6 +607,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, int num_edges, i; edge e; basic_block *worklist, *tos, bb; + edge_iterator ei; num_edges = NUM_EDGES (edge_list); @@ -620,7 +628,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, do not want to be overly optimistic. Consider an incoming edge to the exit block. That edge should always have a NEARER value the same as FARTHEST for that edge. */ - for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]); /* Add all the blocks to the worklist. This prevents an early exit @@ -640,12 +648,12 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, /* Compute the intersection of NEARER for each outgoing edge from B. */ sbitmap_ones (nearerout[bb->index]); - for (e = bb->succ; e != NULL; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], nearer[(size_t) e->aux]); /* Calculate NEARER for all incoming edges. */ - for (e = bb->pred; e != NULL; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], farthest[(size_t) e->aux], nearerout[e->dest->index], @@ -663,7 +671,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, for the ENTRY block. We allocated an extra entry in the NEAREROUT array for just this purpose. */ sbitmap_ones (nearerout[last_basic_block]); - for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) sbitmap_a_and_b (nearerout[last_basic_block], nearerout[last_basic_block], nearer[(size_t) e->aux]); @@ -683,7 +691,8 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc, basic_block bb; FOR_EACH_BB (bb) - sbitmap_difference (delete[bb->index], st_avloc[bb->index], nearerout[bb->index]); + sbitmap_difference (delete[bb->index], st_avloc[bb->index], + nearerout[bb->index]); for (x = 0; x < NUM_EDGES (edge_list); x++) { @@ -907,8 +916,9 @@ static void make_preds_opaque (basic_block b, int j) { edge e; + edge_iterator ei; - for (e = b->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, b->preds) { basic_block pb = e->src; @@ -927,12 +937,12 @@ reg_dies (rtx reg, HARD_REG_SET live) { int regno, nregs; - if (GET_CODE (reg) != REG) + if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) - for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0; + for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0; nregs--) CLEAR_HARD_REG_BIT (live, regno + nregs); } @@ -948,16 +958,22 @@ reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live) if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); - if (GET_CODE (reg) != REG) + if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) - for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0; + for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0; nregs--) SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs); } +/* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined + and vice versa. */ +#if defined (MODE_ENTRY) != defined (MODE_EXIT) + #error "Both MODE_ENTRY and MODE_EXIT must be defined" +#endif + /* Find all insns that need a particular mode setting, and insert the necessary mode switches. Return true if we did work. */ @@ -990,7 +1006,7 @@ optimize_mode_switching (FILE *file) /* Create the list of segments within each basic block. If NORMAL_MODE is defined, allow for two extra blocks split from the entry and exit block. */ -#ifdef NORMAL_MODE +#if defined (MODE_ENTRY) && defined (MODE_EXIT) entry_exit_extra = 2; #endif bb_info[n_entities] @@ -1003,23 +1019,24 @@ optimize_mode_switching (FILE *file) if (! n_entities) return 0; -#ifdef NORMAL_MODE +#if defined (MODE_ENTRY) && defined (MODE_EXIT) { /* Split the edge from the entry block and the fallthrough edge to the exit block, so that we can note that there NORMAL_MODE is supplied / required. */ edge eg; - post_entry = split_edge (ENTRY_BLOCK_PTR->succ); + edge_iterator ei; + post_entry = split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); /* The only non-call predecessor at this stage is a block with a fallthrough edge; there can be at most one, but there could be none at all, e.g. when exit is called. */ - for (pre_exit = 0, eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next) + pre_exit = 0; + FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds) if (eg->flags & EDGE_FALLTHRU) { regset live_at_end = eg->src->global_live_at_end; - if (pre_exit) - abort (); + gcc_assert (!pre_exit); pre_exit = split_edge (eg); COPY_REG_SET (pre_exit->global_live_at_start, live_at_end); COPY_REG_SET (pre_exit->global_live_at_end, live_at_end); @@ -1052,8 +1069,8 @@ optimize_mode_switching (FILE *file) REG_SET_TO_HARD_REG_SET (live_now, bb->global_live_at_start); - for (insn = bb->head; - insn != NULL && insn != NEXT_INSN (bb->end); + for (insn = BB_HEAD (bb); + insn != NULL && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) @@ -1068,7 +1085,9 @@ optimize_mode_switching (FILE *file) add_seginfo (info + bb->index, ptr); RESET_BIT (transp[bb->index], j); } - +#ifdef MODE_AFTER + last_mode = MODE_AFTER (last_mode, insn); +#endif /* Update LIVE_NOW. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) @@ -1085,13 +1104,13 @@ optimize_mode_switching (FILE *file) /* Check for blocks without ANY mode requirements. */ if (last_mode == no_mode) { - ptr = new_seginfo (no_mode, bb->end, bb->index, live_now); + ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now); add_seginfo (info + bb->index, ptr); } } -#ifdef NORMAL_MODE +#if defined (MODE_ENTRY) && defined (MODE_EXIT) { - int mode = NORMAL_MODE (e); + int mode = MODE_ENTRY (e); if (mode != no_mode) { @@ -1109,7 +1128,7 @@ optimize_mode_switching (FILE *file) info[bb->index].computing = mode; if (pre_exit) - info[pre_exit->index].seginfo->mode = mode; + info[pre_exit->index].seginfo->mode = MODE_EXIT (e); } } #endif /* NORMAL_MODE */ @@ -1193,8 +1212,8 @@ optimize_mode_switching (FILE *file) if (eg->flags & EDGE_ABNORMAL) { emited = true; - if (GET_CODE (src_bb->end) == JUMP_INSN) - emit_insn_before (mode_set, src_bb->end); + if (JUMP_P (BB_END (src_bb))) + emit_insn_before (mode_set, BB_END (src_bb)); /* It doesn't make sense to switch to normal mode after a CALL_INSN, so we're going to abort if we find one. The cases in which a CALL_INSN may @@ -1206,8 +1225,8 @@ optimize_mode_switching (FILE *file) the call (it wouldn't make sense, anyway). In the case of EH edges, EH entry points also start in normal mode, so a similar reasoning applies. */ - else if (GET_CODE (src_bb->end) == INSN) - emit_insn_after (mode_set, src_bb->end); + else if (NONJUMP_INSN_P (BB_END (src_bb))) + emit_insn_after (mode_set, BB_END (src_bb)); else abort (); bb_info[j][src_bb->index].computing = mode; @@ -1258,7 +1277,7 @@ optimize_mode_switching (FILE *file) continue; emited = true; - if (GET_CODE (ptr->insn_ptr) == NOTE + if (NOTE_P (ptr->insn_ptr) && (NOTE_LINE_NUMBER (ptr->insn_ptr) == NOTE_INSN_BASIC_BLOCK)) emit_insn_after (mode_set, ptr->insn_ptr); @@ -1285,7 +1304,7 @@ optimize_mode_switching (FILE *file) if (need_commit) commit_edge_insertions (); -#ifdef NORMAL_MODE +#if defined (MODE_ENTRY) && defined (MODE_EXIT) cleanup_cfg (CLEANUP_NO_INSN_DEL); #else if (!need_commit && !emited)