X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Flcm.c;h=3432332a06a08db40a8311d9b131a3f7fe9050ad;hb=fceed818838afcb709ccd266c0ce9a75053977d4;hp=1db53c6a3ca2082ad3be259750020cfc851c32cc;hpb=4c5da23833f4604c04fb829abf1a39ab6976e7b2;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/lcm.c b/gcc/lcm.c index 1db53c6a3ca..3432332a06a 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -1,5 +1,6 @@ /* Generic partial redundancy elimination with lazy code motion support. - Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. + Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 + Free Software Foundation, Inc. This file is part of GCC. @@ -51,6 +52,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "config.h" #include "system.h" +#include "coretypes.h" +#include "tm.h" #include "rtl.h" #include "regs.h" #include "hard-reg-set.h" @@ -59,39 +62,31 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "insn-config.h" #include "recog.h" #include "basic-block.h" +#include "output.h" #include "tm_p.h" +#include "function.h" /* We want target macros for the mode switching code to be able to refer to instruction attribute values. */ #include "insn-attr.h" /* Edge based LCM routines. */ -static void compute_antinout_edge PARAMS ((sbitmap *, sbitmap *, - sbitmap *, sbitmap *)); -static void compute_earliest PARAMS ((struct edge_list *, int, - sbitmap *, sbitmap *, - sbitmap *, sbitmap *, - sbitmap *)); -static void compute_laterin PARAMS ((struct edge_list *, sbitmap *, - sbitmap *, sbitmap *, - sbitmap *)); -static void compute_insert_delete PARAMS ((struct edge_list *edge_list, - sbitmap *, sbitmap *, - sbitmap *, sbitmap *, - sbitmap *)); +static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *); +static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *, + sbitmap *, sbitmap *, sbitmap *); +static void compute_laterin (struct edge_list *, sbitmap *, sbitmap *, + sbitmap *, sbitmap *); +static void compute_insert_delete (struct edge_list *edge_list, sbitmap *, + sbitmap *, sbitmap *, sbitmap *, sbitmap *); /* Edge based LCM routines on a reverse flowgraph. */ -static void compute_farthest PARAMS ((struct edge_list *, int, - sbitmap *, sbitmap *, - sbitmap*, sbitmap *, - sbitmap *)); -static void compute_nearerout PARAMS ((struct edge_list *, sbitmap *, - sbitmap *, sbitmap *, - sbitmap *)); -static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list, - sbitmap *, sbitmap *, - sbitmap *, sbitmap *, - sbitmap *)); +static void compute_farthest (struct edge_list *, int, sbitmap *, sbitmap *, + sbitmap*, sbitmap *, sbitmap *); +static void compute_nearerout (struct edge_list *, sbitmap *, sbitmap *, + sbitmap *, sbitmap *); +static void compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *, + sbitmap *, sbitmap *, sbitmap *, + sbitmap *); /* Edge based lcm routines. */ @@ -100,11 +95,8 @@ static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list, Other than that, its pretty much identical to compute_antinout. */ static void -compute_antinout_edge (antloc, transp, antin, antout) - sbitmap *antloc; - sbitmap *transp; - sbitmap *antin; - sbitmap *antout; +compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, + sbitmap *antout) { basic_block bb; edge e; @@ -114,8 +106,7 @@ compute_antinout_edge (antloc, transp, antin, antout) /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ - qin = qout = worklist - = (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks); + qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); /* We want a maximal solution, so make an optimistic initialization of ANTIN. */ @@ -123,15 +114,15 @@ compute_antinout_edge (antloc, transp, antin, antout) /* Put every block on the worklist; this is necessary because of the optimistic initialization of ANTIN above. */ - FOR_ALL_BB_REVERSE (bb) + FOR_EACH_BB_REVERSE (bb) { *qin++ = bb; bb->aux = bb; } qin = worklist; - qend = &worklist[num_basic_blocks]; - qlen = num_basic_blocks; + qend = &worklist[n_basic_blocks]; + qlen = n_basic_blocks; /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ @@ -142,27 +133,27 @@ compute_antinout_edge (antloc, transp, antin, antout) while (qlen) { /* Take the first entry off the worklist. */ - basic_block bb = *qout++; + bb = *qout++; qlen--; if (qout >= qend) - qout = worklist; + qout = worklist; if (bb->aux == EXIT_BLOCK_PTR) /* Do not clear the aux field for blocks which are predecessors of the EXIT block. That way we never add then to the worklist again. */ - sbitmap_zero (antout[bb->sindex]); + sbitmap_zero (antout[bb->index]); else { /* Clear the aux field of this block so that it can be added to the worklist again if necessary. */ bb->aux = NULL; - sbitmap_intersection_of_succs (antout[bb->sindex], antin, bb->sindex); + sbitmap_intersection_of_succs (antout[bb->index], antin, bb->index); } - if (sbitmap_a_or_b_and_c_cg (antin[bb->sindex], antloc[bb->sindex], - transp[bb->sindex], antout[bb->sindex])) + if (sbitmap_a_or_b_and_c_cg (antin[bb->index], antloc[bb->index], + transp[bb->index], antout[bb->index])) /* If the in state of this block changed, then we need to add the predecessors of this block to the worklist if they are not already on the worklist. */ @@ -173,7 +164,7 @@ compute_antinout_edge (antloc, transp, antin, antout) e->src->aux = e; qlen++; if (qin >= qend) - qin = worklist; + qin = worklist; } } @@ -185,10 +176,9 @@ compute_antinout_edge (antloc, transp, antin, antout) /* Compute the earliest vector for edge based lcm. */ static void -compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest) - struct edge_list *edge_list; - int n_exprs; - sbitmap *antin, *antout, *avout, *kill, *earliest; +compute_earliest (struct edge_list *edge_list, int n_exprs, sbitmap *antin, + sbitmap *antout, sbitmap *avout, sbitmap *kill, + sbitmap *earliest) { sbitmap difference, temp_bitmap; int x, num_edges; @@ -204,22 +194,18 @@ compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest) pred = INDEX_EDGE_PRED_BB (edge_list, x); succ = INDEX_EDGE_SUCC_BB (edge_list, x); if (pred == ENTRY_BLOCK_PTR) - sbitmap_copy (earliest[x], antin[succ->sindex]); + sbitmap_copy (earliest[x], antin[succ->index]); else - { - /* We refer to the EXIT_BLOCK index, instead of testing for - EXIT_BLOCK_PTR, so that EXIT_BLOCK_PTR's index can be - changed so as to pretend it's a regular block, so that - its antin can be taken into account. */ - if (succ->sindex == EXIT_BLOCK) + { + if (succ == EXIT_BLOCK_PTR) sbitmap_zero (earliest[x]); else { - sbitmap_difference (difference, antin[succ->sindex], - avout[pred->sindex]); - sbitmap_not (temp_bitmap, antout[pred->sindex]); + sbitmap_difference (difference, antin[succ->index], + avout[pred->index]); + sbitmap_not (temp_bitmap, antout[pred->index]); sbitmap_a_and_b_or_c (earliest[x], difference, - kill[pred->sindex], temp_bitmap); + kill[pred->index], temp_bitmap); } } } @@ -258,9 +244,8 @@ compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest) to compute it. */ static void -compute_laterin (edge_list, earliest, antloc, later, laterin) - struct edge_list *edge_list; - sbitmap *earliest, *antloc, *later, *laterin; +compute_laterin (struct edge_list *edge_list, sbitmap *earliest, + sbitmap *antloc, sbitmap *later, sbitmap *laterin) { int num_edges, i; edge e; @@ -273,7 +258,7 @@ compute_laterin (edge_list, earliest, antloc, later, laterin) list if they were not already on the list. So the size is bounded by the number of basic blocks. */ qin = qout = worklist - = (basic_block *) xmalloc (sizeof (basic_block) * (num_basic_blocks + 1)); + = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); /* Initialize a mapping from each edge to its index. */ for (i = 0; i < num_edges; i++) @@ -300,17 +285,18 @@ compute_laterin (edge_list, earliest, antloc, later, laterin) /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of LATER above. */ - FOR_ALL_BB (bb) + FOR_EACH_BB (bb) { *qin++ = bb; bb->aux = bb; } - qin = worklist; + /* Note that we do not use the last allocated element for our queue, as EXIT_BLOCK is never inserted into it. In fact the above allocation - of num_basic_blocks + 1 elements is not encessary. */ - qend = &worklist[num_basic_blocks]; - qlen = num_basic_blocks; + of n_basic_blocks + 1 elements is not necessary. */ + qin = worklist; + qend = &worklist[n_basic_blocks]; + qlen = n_basic_blocks; /* Iterate until the worklist is empty. */ while (qlen) @@ -320,19 +306,20 @@ compute_laterin (edge_list, earliest, antloc, later, laterin) bb->aux = NULL; qlen--; if (qout >= qend) - qout = worklist; + qout = worklist; /* Compute the intersection of LATERIN for each incoming edge to B. */ - sbitmap_ones (laterin[bb->sindex]); + sbitmap_ones (laterin[bb->index]); for (e = bb->pred; e != NULL; e = e->pred_next) - sbitmap_a_and_b (laterin[bb->sindex], laterin[bb->sindex], later[(size_t)e->aux]); + sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], + later[(size_t)e->aux]); /* Calculate LATER for all outgoing edges. */ for (e = bb->succ; e != NULL; e = e->succ_next) if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], - earliest[(size_t) e->aux], - laterin[e->src->sindex], - antloc[e->src->sindex]) + earliest[(size_t) e->aux], + laterin[e->src->index], + antloc[e->src->index]) /* If LATER for an outgoing edge was changed, then we need to add the target of the outgoing edge to the worklist. */ && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0) @@ -361,16 +348,16 @@ compute_laterin (edge_list, earliest, antloc, later, laterin) /* Compute the insertion and deletion points for edge based LCM. */ static void -compute_insert_delete (edge_list, antloc, later, laterin, - insert, delete) - struct edge_list *edge_list; - sbitmap *antloc, *later, *laterin, *insert, *delete; +compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc, + sbitmap *later, sbitmap *laterin, sbitmap *insert, + sbitmap *delete) { int x; basic_block bb; - FOR_ALL_BB (bb) - sbitmap_difference (delete[bb->sindex], antloc[bb->sindex], laterin[bb->sindex]); + FOR_EACH_BB (bb) + sbitmap_difference (delete[bb->index], antloc[bb->index], + laterin[bb->index]); for (x = 0; x < NUM_EDGES (edge_list); x++) { @@ -379,7 +366,7 @@ compute_insert_delete (edge_list, antloc, later, laterin, if (b == EXIT_BLOCK_PTR) sbitmap_difference (insert[x], later[x], laterin[last_basic_block]); else - sbitmap_difference (insert[x], later[x], laterin[b->sindex]); + sbitmap_difference (insert[x], later[x], laterin[b->index]); } } @@ -388,15 +375,9 @@ compute_insert_delete (edge_list, antloc, later, laterin, map the insert vector to what edge an expression should be inserted on. */ struct edge_list * -pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete) - FILE *file ATTRIBUTE_UNUSED; - int n_exprs; - sbitmap *transp; - sbitmap *avloc; - sbitmap *antloc; - sbitmap *kill; - sbitmap **insert; - sbitmap **delete; +pre_edge_lcm (FILE *file ATTRIBUTE_UNUSED, int n_exprs, sbitmap *transp, + sbitmap *avloc, sbitmap *antloc, sbitmap *kill, + sbitmap **insert, sbitmap **delete) { sbitmap *antin, *antout, *earliest; sbitmap *avin, *avout; @@ -491,8 +472,8 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete) Return the number of passes we performed to iterate to a solution. */ void -compute_available (avloc, kill, avout, avin) - sbitmap *avloc, *kill, *avout, *avin; +compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, + sbitmap *avin) { edge e; basic_block *worklist, *qin, *qout, *qend, bb; @@ -501,23 +482,22 @@ compute_available (avloc, kill, avout, avin) /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ - qin = qout = worklist - = (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks); + qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); /* We want a maximal solution. */ sbitmap_vector_ones (avout, last_basic_block); /* Put every block on the worklist; this is necessary because of the optimistic initialization of AVOUT above. */ - FOR_ALL_BB (bb) + FOR_EACH_BB (bb) { *qin++ = bb; bb->aux = bb; } qin = worklist; - qend = &worklist[num_basic_blocks]; - qlen = num_basic_blocks; + qend = &worklist[n_basic_blocks]; + qlen = n_basic_blocks; /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ @@ -528,11 +508,11 @@ compute_available (avloc, kill, avout, avin) while (qlen) { /* Take the first entry off the worklist. */ - basic_block bb = *qout++; + bb = *qout++; qlen--; if (qout >= qend) - qout = worklist; + qout = worklist; /* If one of the predecessor blocks is the ENTRY block, then the intersection of avouts is the null set. We can identify such blocks @@ -540,17 +520,17 @@ compute_available (avloc, kill, avout, avin) if (bb->aux == ENTRY_BLOCK_PTR) /* Do not clear the aux field for blocks which are successors of the ENTRY block. That way we never add then to the worklist again. */ - sbitmap_zero (avin[bb->sindex]); + sbitmap_zero (avin[bb->index]); else { /* Clear the aux field of this block so that it can be added to the worklist again if necessary. */ bb->aux = NULL; - sbitmap_intersection_of_preds (avin[bb->sindex], avout, bb->sindex); + sbitmap_intersection_of_preds (avin[bb->index], avout, bb->index); } - if (sbitmap_union_of_diff_cg (avout[bb->sindex], avloc[bb->sindex], - avin[bb->sindex], kill[bb->sindex])) + if (sbitmap_union_of_diff_cg (avout[bb->index], avloc[bb->index], + avin[bb->index], kill[bb->index])) /* If the out state of this block changed, then we need to add the successors of this block to the worklist if they are not already on the worklist. */ @@ -562,7 +542,7 @@ compute_available (avloc, kill, avout, avin) qlen++; if (qin >= qend) - qin = worklist; + qin = worklist; } } @@ -574,11 +554,9 @@ compute_available (avloc, kill, avout, avin) /* Compute the farthest vector for edge based lcm. */ static void -compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin, - kill, farthest) - struct edge_list *edge_list; - int n_exprs; - sbitmap *st_avout, *st_avin, *st_antin, *kill, *farthest; +compute_farthest (struct edge_list *edge_list, int n_exprs, + sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin, + sbitmap *kill, sbitmap *farthest) { sbitmap difference, temp_bitmap; int x, num_edges; @@ -594,18 +572,18 @@ compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin, pred = INDEX_EDGE_PRED_BB (edge_list, x); succ = INDEX_EDGE_SUCC_BB (edge_list, x); if (succ == EXIT_BLOCK_PTR) - sbitmap_copy (farthest[x], st_avout[pred->sindex]); + sbitmap_copy (farthest[x], st_avout[pred->index]); else { if (pred == ENTRY_BLOCK_PTR) sbitmap_zero (farthest[x]); else { - sbitmap_difference (difference, st_avout[pred->sindex], - st_antin[succ->sindex]); - sbitmap_not (temp_bitmap, st_avin[succ->sindex]); + sbitmap_difference (difference, st_avout[pred->index], + st_antin[succ->index]); + sbitmap_not (temp_bitmap, st_avin[succ->index]); sbitmap_a_and_b_or_c (farthest[x], difference, - kill[succ->sindex], temp_bitmap); + kill[succ->index], temp_bitmap); } } } @@ -620,9 +598,8 @@ compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin, implementation can be found before compute_laterin. */ static void -compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout) - struct edge_list *edge_list; - sbitmap *farthest, *st_avloc, *nearer, *nearerout; +compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, + sbitmap *st_avloc, sbitmap *nearer, sbitmap *nearerout) { int num_edges, i; edge e; @@ -633,8 +610,7 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout) /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ - tos = worklist - = (basic_block *) xmalloc (sizeof (basic_block) * (num_basic_blocks + 1)); + tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); /* Initialize NEARER for each edge and build a mapping from an edge to its index. */ @@ -653,7 +629,7 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout) /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of NEARER. */ - FOR_ALL_BB (bb) + FOR_EACH_BB (bb) { *tos++ = bb; bb->aux = bb; @@ -667,17 +643,17 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout) bb->aux = NULL; /* Compute the intersection of NEARER for each outgoing edge from B. */ - sbitmap_ones (nearerout[bb->sindex]); + sbitmap_ones (nearerout[bb->index]); for (e = bb->succ; e != NULL; e = e->succ_next) - sbitmap_a_and_b (nearerout[bb->sindex], nearerout[bb->sindex], + sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], nearer[(size_t) e->aux]); /* Calculate NEARER for all incoming edges. */ for (e = bb->pred; e != NULL; e = e->pred_next) if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], - farthest[(size_t) e->aux], - nearerout[e->dest->sindex], - st_avloc[e->dest->sindex]) + farthest[(size_t) e->aux], + nearerout[e->dest->index], + st_avloc[e->dest->index]) /* If NEARER for an incoming edge was changed, then we need to add the source of the incoming edge to the worklist. */ && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0) @@ -703,17 +679,16 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout) /* Compute the insertion and deletion points for edge based LCM. */ static void -compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout, - insert, delete) - struct edge_list *edge_list; - sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete; +compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc, + sbitmap *nearer, sbitmap *nearerout, + sbitmap *insert, sbitmap *delete) { int x; basic_block bb; - FOR_ALL_BB (bb) - sbitmap_difference (delete[bb->sindex], st_avloc[bb->sindex], - nearerout[bb->sindex]); + FOR_EACH_BB (bb) + sbitmap_difference (delete[bb->index], st_avloc[bb->index], + nearerout[bb->index]); for (x = 0; x < NUM_EDGES (edge_list); x++) { @@ -721,7 +696,7 @@ compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout, if (b == ENTRY_BLOCK_PTR) sbitmap_difference (insert[x], nearer[x], nearerout[last_basic_block]); else - sbitmap_difference (insert[x], nearer[x], nearerout[b->sindex]); + sbitmap_difference (insert[x], nearer[x], nearerout[b->index]); } } @@ -731,16 +706,9 @@ compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout, an expression should be inserted on. */ struct edge_list * -pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill, - insert, delete) - FILE *file ATTRIBUTE_UNUSED; - int n_exprs; - sbitmap *transp; - sbitmap *st_avloc; - sbitmap *st_antloc; - sbitmap *kill; - sbitmap **insert; - sbitmap **delete; +pre_edge_rev_lcm (FILE *file ATTRIBUTE_UNUSED, int n_exprs, sbitmap *transp, + sbitmap *st_avloc, sbitmap *st_antloc, sbitmap *kill, + sbitmap **insert, sbitmap **delete) { sbitmap *st_antin, *st_antout; sbitmap *st_avout, *st_avin, *farthest; @@ -751,8 +719,8 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill, edge_list = create_edge_list (); num_edges = NUM_EDGES (edge_list); - st_antin = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs); - st_antout = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs); + st_antin = sbitmap_vector_alloc (last_basic_block, n_exprs); + st_antout = sbitmap_vector_alloc (last_basic_block, n_exprs); sbitmap_vector_zero (st_antin, last_basic_block); sbitmap_vector_zero (st_antout, last_basic_block); compute_antinout_edge (st_antloc, transp, st_antin, st_antout); @@ -852,9 +820,9 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill, The LCM algorithm is then run over the flow graph to determine where to place the sets to the highest-priority value in respect of first the first - insn in any one block. Any adjustments required to the transparancy + insn in any one block. Any adjustments required to the transparency vectors are made, then the next iteration starts for the next-lower - priority mode, till for each entity all modes are exhasted. + priority mode, till for each entity all modes are exhausted. More details are located in the code for optimize_mode_switching(). */ @@ -889,11 +857,11 @@ static sbitmap *comp; static sbitmap *delete; static sbitmap *insert; -static struct seginfo * new_seginfo PARAMS ((int, rtx, int, HARD_REG_SET)); -static void add_seginfo PARAMS ((struct bb_info *, struct seginfo *)); -static void reg_dies PARAMS ((rtx, HARD_REG_SET)); -static void reg_becomes_live PARAMS ((rtx, rtx, void *)); -static void make_preds_opaque PARAMS ((basic_block, int)); +static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET); +static void add_seginfo (struct bb_info *, struct seginfo *); +static void reg_dies (rtx, HARD_REG_SET); +static void reg_becomes_live (rtx, rtx, void *); +static void make_preds_opaque (basic_block, int); #endif #ifdef OPTIMIZE_MODE_SWITCHING @@ -902,11 +870,7 @@ static void make_preds_opaque PARAMS ((basic_block, int)); with the MODE, INSN, and basic block BB parameters. */ static struct seginfo * -new_seginfo (mode, insn, bb, regs_live) - int mode; - rtx insn; - int bb; - HARD_REG_SET regs_live; +new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live) { struct seginfo *ptr; ptr = xmalloc (sizeof (struct seginfo)); @@ -923,9 +887,7 @@ new_seginfo (mode, insn, bb, regs_live) INFO is the structure to be linked in. */ static void -add_seginfo (head, info) - struct bb_info *head; - struct seginfo *info; +add_seginfo (struct bb_info *head, struct seginfo *info) { struct seginfo *ptr; @@ -935,7 +897,7 @@ add_seginfo (head, info) { ptr = head->seginfo; while (ptr->next != NULL) - ptr = ptr->next; + ptr = ptr->next; ptr->next = info; } } @@ -947,9 +909,7 @@ add_seginfo (head, info) we are currently handling mode-switching for. */ static void -make_preds_opaque (b, j) - basic_block b; - int j; +make_preds_opaque (basic_block b, int j) { edge e; @@ -957,10 +917,10 @@ make_preds_opaque (b, j) { basic_block pb = e->src; - if (e->aux || ! TEST_BIT (transp[pb->sindex], j)) + if (e->aux || ! TEST_BIT (transp[pb->index], j)) continue; - RESET_BIT (transp[pb->sindex], j); + RESET_BIT (transp[pb->index], j); make_preds_opaque (pb, j); } } @@ -968,18 +928,16 @@ make_preds_opaque (b, j) /* Record in LIVE that register REG died. */ static void -reg_dies (reg, live) - rtx reg; - HARD_REG_SET live; +reg_dies (rtx reg, HARD_REG_SET live) { int regno, nregs; - if (GET_CODE (reg) != REG) + if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) - for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0; + for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0; nregs--) CLEAR_HARD_REG_BIT (live, regno + nregs); } @@ -988,32 +946,34 @@ reg_dies (reg, live) This is called via note_stores. */ static void -reg_becomes_live (reg, setter, live) - rtx reg; - rtx setter ATTRIBUTE_UNUSED; - void *live; +reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live) { int regno, nregs; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); - if (GET_CODE (reg) != REG) + if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) - for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0; + for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0; nregs--) SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs); } +/* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined + and vice versa. */ +#if defined (MODE_ENTRY) != defined (MODE_EXIT) + #error "Both MODE_ENTRY and MODE_EXIT must be defined" +#endif + /* Find all insns that need a particular mode setting, and insert the necessary mode switches. Return true if we did work. */ int -optimize_mode_switching (file) - FILE *file; +optimize_mode_switching (FILE *file) { rtx insn; int e; @@ -1029,43 +989,53 @@ optimize_mode_switching (file) int n_entities; int max_num_modes = 0; bool emited = false; + basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED; clear_bb_flags (); -#ifdef NORMAL_MODE - /* Increment last_basic_block before allocating bb_info. */ - last_basic_block++; -#endif for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--) if (OPTIMIZE_MODE_SWITCHING (e)) { - /* Create the list of segments within each basic block. */ + int entry_exit_extra = 0; + + /* Create the list of segments within each basic block. + If NORMAL_MODE is defined, allow for two extra + blocks split from the entry and exit block. */ +#if defined (MODE_ENTRY) && defined (MODE_EXIT) + entry_exit_extra = 2; +#endif bb_info[n_entities] - = (struct bb_info *) xcalloc (last_basic_block, sizeof **bb_info); + = xcalloc (last_basic_block + entry_exit_extra, sizeof **bb_info); entity_map[n_entities++] = e; if (num_modes[e] > max_num_modes) max_num_modes = num_modes[e]; } -#ifdef NORMAL_MODE - /* Decrement it back in case we return below. */ - last_basic_block--; -#endif - if (! n_entities) return 0; -#ifdef NORMAL_MODE - /* We're going to pretend the EXIT_BLOCK is a regular basic block, - so that switching back to normal mode when entering the - EXIT_BLOCK isn't optimized away. We do this by incrementing the - basic block count, growing the VARRAY of basic_block_info and - appending the EXIT_BLOCK_PTR to it. */ - last_basic_block++; - if (VARRAY_SIZE (basic_block_info) < last_basic_block) - VARRAY_GROW (basic_block_info, last_basic_block); - BASIC_BLOCK (last_basic_block - 1) = EXIT_BLOCK_PTR; - EXIT_BLOCK_PTR->sindex = last_basic_blocks; +#if defined (MODE_ENTRY) && defined (MODE_EXIT) + { + /* Split the edge from the entry block and the fallthrough edge to the + exit block, so that we can note that there NORMAL_MODE is supplied / + required. */ + edge eg; + post_entry = split_edge (ENTRY_BLOCK_PTR->succ); + /* The only non-call predecessor at this stage is a block with a + fallthrough edge; there can be at most one, but there could be + none at all, e.g. when exit is called. */ + for (pre_exit = 0, eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next) + if (eg->flags & EDGE_FALLTHRU) + { + regset live_at_end = eg->src->global_live_at_end; + + if (pre_exit) + abort (); + pre_exit = split_edge (eg); + COPY_REG_SET (pre_exit->global_live_at_start, live_at_end); + COPY_REG_SET (pre_exit->global_live_at_end, live_at_end); + } + } #endif /* Create the bitmap vectors. */ @@ -1085,7 +1055,7 @@ optimize_mode_switching (file) /* Determine what the first use (if any) need for a mode of entity E is. This will be the mode that is anticipatable for this block. Also compute the initial transparency settings. */ - FOR_ALL_BB (bb) + FOR_EACH_BB (bb) { struct seginfo *ptr; int last_mode = no_mode; @@ -1093,8 +1063,8 @@ optimize_mode_switching (file) REG_SET_TO_HARD_REG_SET (live_now, bb->global_live_at_start); - for (insn = bb->head; - insn != NULL && insn != NEXT_INSN (bb->end); + for (insn = BB_HEAD (bb); + insn != NULL && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) @@ -1105,11 +1075,13 @@ optimize_mode_switching (file) if (mode != no_mode && mode != last_mode) { last_mode = mode; - ptr = new_seginfo (mode, insn, bb->sindex, live_now); - add_seginfo (info + bb->sindex, ptr); - RESET_BIT (transp[bb->sindex], j); + ptr = new_seginfo (mode, insn, bb->index, live_now); + add_seginfo (info + bb->index, ptr); + RESET_BIT (transp[bb->index], j); } - +#ifdef MODE_AFTER + last_mode = MODE_AFTER (last_mode, insn); +#endif /* Update LIVE_NOW. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) @@ -1122,50 +1094,35 @@ optimize_mode_switching (file) } } - info[bb->sindex].computing = last_mode; + info[bb->index].computing = last_mode; /* Check for blocks without ANY mode requirements. */ if (last_mode == no_mode) { - ptr = new_seginfo (no_mode, insn, bb->sindex, live_now); - add_seginfo (info + bb->sindex, ptr); + ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now); + add_seginfo (info + bb->index, ptr); } } -#ifdef NORMAL_MODE +#if defined (MODE_ENTRY) && defined (MODE_EXIT) { - int mode = NORMAL_MODE (e); + int mode = MODE_ENTRY (e); if (mode != no_mode) { - edge eg; + bb = post_entry; - for (eg = ENTRY_BLOCK_PTR->succ; eg; eg = eg->succ_next) - { - bb = eg->dest; - - /* By always making this nontransparent, we save - an extra check in make_preds_opaque. We also - need this to avoid confusing pre_edge_lcm when - antic is cleared but transp and comp are set. */ - RESET_BIT (transp[bb->sindex], j); - - /* If the block already has MODE, pretend it - has none (because we don't need to set it), - but retain whatever mode it computes. */ - if (info[bb->sindex].seginfo->mode == mode) - info[bb->sindex].seginfo->mode = no_mode; - - /* Insert a fake computing definition of MODE into entry - blocks which compute no mode. This represents the mode on - entry. */ - else if (info[bb->sindex].computing == no_mode) - { - info[bb->sindex].computing = mode; - info[bb->sindex].seginfo->mode = no_mode; - } - } + /* By always making this nontransparent, we save + an extra check in make_preds_opaque. We also + need this to avoid confusing pre_edge_lcm when + antic is cleared but transp and comp are set. */ + RESET_BIT (transp[bb->index], j); + + /* Insert a fake computing definition of MODE into entry + blocks which compute no mode. This represents the mode on + entry. */ + info[bb->index].computing = mode; - bb = EXIT_BLOCK_PTR; - info[bb->sindex].seginfo->mode = mode; + if (pre_exit) + info[pre_exit->index].seginfo->mode = MODE_EXIT (e); } } #endif /* NORMAL_MODE */ @@ -1184,21 +1141,21 @@ optimize_mode_switching (file) int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i); struct bb_info *info = bb_info[j]; - FOR_ALL_BB (bb) + FOR_EACH_BB (bb) { - if (info[bb->sindex].seginfo->mode == m) - SET_BIT (antic[bb->sindex], j); + if (info[bb->index].seginfo->mode == m) + SET_BIT (antic[bb->index], j); - if (info[bb->sindex].computing == m) - SET_BIT (comp[bb->sindex], j); + if (info[bb->index].computing == m) + SET_BIT (comp[bb->index], j); } } /* Calculate the optimal locations for the placement mode switches to modes with priority I. */ - FOR_ALL_BB_REVERSE (bb) - sbitmap_not (kill[bb->sindex], transp[bb->sindex]); + FOR_EACH_BB (bb) + sbitmap_not (kill[bb->index], transp[bb->index]); edge_list = pre_edge_lcm (file, 1, transp, comp, antic, kill, &insert, &delete); @@ -1237,12 +1194,11 @@ optimize_mode_switching (file) start_sequence (); EMIT_MODE_SET (entity_map[j], mode, live_at_edge); - mode_set = gen_sequence (); + mode_set = get_insns (); end_sequence (); /* Do not bother to insert empty sequence. */ - if (GET_CODE (mode_set) == SEQUENCE - && !XVECLEN (mode_set, 0)) + if (mode_set == NULL_RTX) continue; /* If this is an abnormal edge, we'll insert at the end @@ -1250,8 +1206,8 @@ optimize_mode_switching (file) if (eg->flags & EDGE_ABNORMAL) { emited = true; - if (GET_CODE (src_bb->end) == JUMP_INSN) - emit_insn_before (mode_set, src_bb->end); + if (JUMP_P (BB_END (src_bb))) + emit_insn_before (mode_set, BB_END (src_bb)); /* It doesn't make sense to switch to normal mode after a CALL_INSN, so we're going to abort if we find one. The cases in which a CALL_INSN may @@ -1263,12 +1219,12 @@ optimize_mode_switching (file) the call (it wouldn't make sense, anyway). In the case of EH edges, EH entry points also start in normal mode, so a similar reasoning applies. */ - else if (GET_CODE (src_bb->end) == INSN) - emit_insn_after (mode_set, src_bb->end); + else if (NONJUMP_INSN_P (BB_END (src_bb))) + emit_insn_after (mode_set, BB_END (src_bb)); else abort (); - bb_info[j][src_bb->sindex].computing = mode; - RESET_BIT (transp[src_bb->sindex], j); + bb_info[j][src_bb->index].computing = mode; + RESET_BIT (transp[src_bb->index], j); } else { @@ -1277,12 +1233,12 @@ optimize_mode_switching (file) } } - FOR_ALL_BB_REVERSE (bb) - if (TEST_BIT (delete[bb->sindex], j)) + FOR_EACH_BB_REVERSE (bb) + if (TEST_BIT (delete[bb->index], j)) { make_preds_opaque (bb, j); /* Cancel the 'deleted' mode set. */ - bb_info[j][bb->sindex].seginfo->mode = no_mode; + bb_info[j][bb->index].seginfo->mode = no_mode; } } @@ -1290,67 +1246,15 @@ optimize_mode_switching (file) free_edge_list (edge_list); } -#ifdef NORMAL_MODE - /* Restore the special status of EXIT_BLOCK. */ - last_basic_block--; - VARRAY_POP (basic_block_info); - EXIT_BLOCK_PTR->sindex = EXIT_BLOCK; -#endif - /* Now output the remaining mode sets in all the segments. */ for (j = n_entities - 1; j >= 0; j--) { int no_mode = num_modes[entity_map[j]]; -#ifdef NORMAL_MODE - if (bb_info[j][last_basic_block].seginfo->mode != no_mode) - { - edge eg; - struct seginfo *ptr = bb_info[j][last_basic_block].seginfo; - - for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next) - { - rtx mode_set; - - if (bb_info[j][eg->src->sindex].computing == ptr->mode) - continue; - - start_sequence (); - EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live); - mode_set = gen_sequence (); - end_sequence (); - - /* Do not bother to insert empty sequence. */ - if (GET_CODE (mode_set) == SEQUENCE - && !XVECLEN (mode_set, 0)) - continue; - - /* If this is an abnormal edge, we'll insert at the end of the - previous block. */ - if (eg->flags & EDGE_ABNORMAL) - { - emited = true; - if (GET_CODE (eg->src->end) == JUMP_INSN) - emit_insn_before (mode_set, eg->src->end); - else if (GET_CODE (eg->src->end) == INSN) - emit_insn_after (mode_set, eg->src->end); - else - abort (); - } - else - { - need_commit = 1; - insert_insn_on_edge (mode_set, eg); - } - } - - } -#endif - - FOR_ALL_BB_REVERSE (bb) + FOR_EACH_BB_REVERSE (bb) { struct seginfo *ptr, *next; - for (ptr = bb_info[j][bb->sindex].seginfo; ptr; ptr = next) + for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next) { next = ptr->next; if (ptr->mode != no_mode) @@ -1359,16 +1263,15 @@ optimize_mode_switching (file) start_sequence (); EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live); - mode_set = gen_sequence (); + mode_set = get_insns (); end_sequence (); /* Do not bother to insert empty sequence. */ - if (GET_CODE (mode_set) == SEQUENCE - && !XVECLEN (mode_set, 0)) + if (mode_set == NULL_RTX) continue; emited = true; - if (GET_CODE (ptr->insn_ptr) == NOTE + if (NOTE_P (ptr->insn_ptr) && (NOTE_LINE_NUMBER (ptr->insn_ptr) == NOTE_INSN_BASIC_BLOCK)) emit_insn_after (mode_set, ptr->insn_ptr); @@ -1395,8 +1298,12 @@ optimize_mode_switching (file) if (need_commit) commit_edge_insertions (); +#if defined (MODE_ENTRY) && defined (MODE_EXIT) + cleanup_cfg (CLEANUP_NO_INSN_DEL); +#else if (!need_commit && !emited) return 0; +#endif max_regno = max_reg_num (); allocate_reg_info (max_regno, FALSE, FALSE);