/* Generic partial redundancy elimination with lazy code motion support.
- Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
This file is part of GCC.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* These routines are meant to be used by various optimization
passes which can be modeled as lazy code motion problems.
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
#include "recog.h"
#include "basic-block.h"
+#include "output.h"
#include "tm_p.h"
-#include "df.h"
+#include "function.h"
/* We want target macros for the mode switching code to be able to refer
to instruction attribute values. */
#include "insn-attr.h"
/* Edge based LCM routines. */
-static void compute_antinout_edge PARAMS ((sbitmap *, sbitmap *,
- sbitmap *, sbitmap *));
-static void compute_earliest PARAMS ((struct edge_list *, int,
- sbitmap *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-static void compute_laterin PARAMS ((struct edge_list *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-static void compute_insert_delete PARAMS ((struct edge_list *edge_list,
- sbitmap *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
+static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *);
+static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *);
+static void compute_laterin (struct edge_list *, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *);
+static void compute_insert_delete (struct edge_list *edge_list, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *, sbitmap *);
/* Edge based LCM routines on a reverse flowgraph. */
-static void compute_farthest PARAMS ((struct edge_list *, int,
- sbitmap *, sbitmap *,
- sbitmap*, sbitmap *,
- sbitmap *));
-static void compute_nearerout PARAMS ((struct edge_list *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list,
- sbitmap *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-
-static void available_transfer_function PARAMS ((int, int *, sbitmap, sbitmap,
- sbitmap, sbitmap, void *));
+static void compute_farthest (struct edge_list *, int, sbitmap *, sbitmap *,
+ sbitmap*, sbitmap *, sbitmap *);
+static void compute_nearerout (struct edge_list *, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *);
+static void compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *,
+ sbitmap *);
\f
/* Edge based lcm routines. */
+
/* Compute expression anticipatability at entrance and exit of each block.
This is done based on the flow graph, and not on the pred-succ lists.
Other than that, its pretty much identical to compute_antinout. */
static void
-compute_antinout_edge (antloc, transp, antin, antout)
- sbitmap *antloc;
- sbitmap *transp;
- sbitmap *antin;
- sbitmap *antout;
+compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
+ sbitmap *antout)
{
- int bb;
+ basic_block bb;
edge e;
basic_block *worklist, *qin, *qout, *qend;
unsigned int qlen;
+ edge_iterator ei;
+
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
+ qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks);
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
- sbitmap_vector_ones (antin, n_basic_blocks);
+ sbitmap_vector_ones (antin, last_basic_block);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of ANTIN above. */
- for (bb = n_basic_blocks - 1; bb >= 0; bb--)
+ FOR_EACH_BB_REVERSE (bb)
{
- *qin++ = BASIC_BLOCK (bb);
- BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
+ *qin++ = bb;
+ bb->aux = bb;
}
qin = worklist;
- qend = &worklist[n_basic_blocks];
- qlen = n_basic_blocks;
+ qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
- for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
e->src->aux = EXIT_BLOCK_PTR;
/* Iterate until the worklist is empty. */
while (qlen)
{
/* Take the first entry off the worklist. */
- basic_block b = *qout++;
- bb = b->index;
+ bb = *qout++;
qlen--;
+
if (qout >= qend)
- qout = worklist;
+ qout = worklist;
- if (b->aux == EXIT_BLOCK_PTR)
+ if (bb->aux == EXIT_BLOCK_PTR)
/* Do not clear the aux field for blocks which are predecessors of
the EXIT block. That way we never add then to the worklist
again. */
- sbitmap_zero (antout[bb]);
+ sbitmap_zero (antout[bb->index]);
else
{
/* Clear the aux field of this block so that it can be added to
the worklist again if necessary. */
- b->aux = NULL;
- sbitmap_intersection_of_succs (antout[bb], antin, bb);
+ bb->aux = NULL;
+ sbitmap_intersection_of_succs (antout[bb->index], antin, bb->index);
}
- if (sbitmap_a_or_b_and_c (antin[bb], antloc[bb], transp[bb], antout[bb]))
+ if (sbitmap_a_or_b_and_c_cg (antin[bb->index], antloc[bb->index],
+ transp[bb->index], antout[bb->index]))
/* If the in state of this block changed, then we need
to add the predecessors of this block to the worklist
if they are not already on the worklist. */
- for (e = b->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, bb->preds)
if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
{
*qin++ = e->src;
e->src->aux = e;
qlen++;
if (qin >= qend)
- qin = worklist;
+ qin = worklist;
}
}
/* Compute the earliest vector for edge based lcm. */
static void
-compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest)
- struct edge_list *edge_list;
- int n_exprs;
- sbitmap *antin, *antout, *avout, *kill, *earliest;
+compute_earliest (struct edge_list *edge_list, int n_exprs, sbitmap *antin,
+ sbitmap *antout, sbitmap *avout, sbitmap *kill,
+ sbitmap *earliest)
{
sbitmap difference, temp_bitmap;
int x, num_edges;
if (pred == ENTRY_BLOCK_PTR)
sbitmap_copy (earliest[x], antin[succ->index]);
else
- {
- /* We refer to the EXIT_BLOCK index, instead of testing for
- EXIT_BLOCK_PTR, so that EXIT_BLOCK_PTR's index can be
- changed so as to pretend it's a regular block, so that
- its antin can be taken into account. */
- if (succ->index == EXIT_BLOCK)
+ {
+ if (succ == EXIT_BLOCK_PTR)
sbitmap_zero (earliest[x]);
else
{
}
}
- free (temp_bitmap);
- free (difference);
+ sbitmap_free (temp_bitmap);
+ sbitmap_free (difference);
}
/* later(p,s) is dependent on the calculation of laterin(p).
to compute it. */
static void
-compute_laterin (edge_list, earliest, antloc, later, laterin)
- struct edge_list *edge_list;
- sbitmap *earliest, *antloc, *later, *laterin;
+compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
+ sbitmap *antloc, sbitmap *later, sbitmap *laterin)
{
- int bb, num_edges, i;
+ int num_edges, i;
edge e;
- basic_block *worklist, *qin, *qout, *qend;
+ basic_block *worklist, *qin, *qout, *qend, bb;
unsigned int qlen;
+ edge_iterator ei;
num_edges = NUM_EDGES (edge_list);
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ = XNEWVEC (basic_block, n_basic_blocks);
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
do not want to be overly optimistic. Consider an outgoing edge from
the entry block. That edge should always have a LATER value the
same as EARLIEST for that edge. */
- for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]);
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- basic_block b = BASIC_BLOCK (bb);
- *qin++ = b;
- b->aux = b;
+ *qin++ = bb;
+ bb->aux = bb;
}
- qin = worklist;
+
/* Note that we do not use the last allocated element for our queue,
- as EXIT_BLOCK is never inserted into it. In fact the above allocation
- of n_basic_blocks + 1 elements is not encessary. */
- qend = &worklist[n_basic_blocks];
- qlen = n_basic_blocks;
+ as EXIT_BLOCK is never inserted into it. */
+ qin = worklist;
+ qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
/* Iterate until the worklist is empty. */
while (qlen)
{
/* Take the first entry off the worklist. */
- basic_block b = *qout++;
- b->aux = NULL;
+ bb = *qout++;
+ bb->aux = NULL;
qlen--;
if (qout >= qend)
- qout = worklist;
+ qout = worklist;
/* Compute the intersection of LATERIN for each incoming edge to B. */
- bb = b->index;
- sbitmap_ones (laterin[bb]);
- for (e = b->pred; e != NULL; e = e->pred_next)
- sbitmap_a_and_b (laterin[bb], laterin[bb], later[(size_t)e->aux]);
+ sbitmap_ones (laterin[bb->index]);
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ sbitmap_a_and_b (laterin[bb->index], laterin[bb->index],
+ later[(size_t)e->aux]);
/* Calculate LATER for all outgoing edges. */
- for (e = b->succ; e != NULL; e = e->succ_next)
- if (sbitmap_union_of_diff (later[(size_t) e->aux],
- earliest[(size_t) e->aux],
- laterin[e->src->index],
- antloc[e->src->index])
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (sbitmap_union_of_diff_cg (later[(size_t) e->aux],
+ earliest[(size_t) e->aux],
+ laterin[e->src->index],
+ antloc[e->src->index])
/* If LATER for an outgoing edge was changed, then we need
to add the target of the outgoing edge to the worklist. */
&& e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
/* Computation of insertion and deletion points requires computing LATERIN
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
- sbitmap_ones (laterin[n_basic_blocks]);
- for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next)
- sbitmap_a_and_b (laterin[n_basic_blocks],
- laterin[n_basic_blocks],
+ sbitmap_ones (laterin[last_basic_block]);
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ sbitmap_a_and_b (laterin[last_basic_block],
+ laterin[last_basic_block],
later[(size_t) e->aux]);
clear_aux_for_edges ();
/* Compute the insertion and deletion points for edge based LCM. */
static void
-compute_insert_delete (edge_list, antloc, later, laterin,
- insert, delete)
- struct edge_list *edge_list;
- sbitmap *antloc, *later, *laterin, *insert, *delete;
+compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
+ sbitmap *later, sbitmap *laterin, sbitmap *insert,
+ sbitmap *delete)
{
int x;
+ basic_block bb;
- for (x = 0; x < n_basic_blocks; x++)
- sbitmap_difference (delete[x], antloc[x], laterin[x]);
+ FOR_EACH_BB (bb)
+ sbitmap_difference (delete[bb->index], antloc[bb->index],
+ laterin[bb->index]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
if (b == EXIT_BLOCK_PTR)
- sbitmap_difference (insert[x], later[x], laterin[n_basic_blocks]);
+ sbitmap_difference (insert[x], later[x], laterin[last_basic_block]);
else
sbitmap_difference (insert[x], later[x], laterin[b->index]);
}
map the insert vector to what edge an expression should be inserted on. */
struct edge_list *
-pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
- FILE *file ATTRIBUTE_UNUSED;
- int n_exprs;
- sbitmap *transp;
- sbitmap *avloc;
- sbitmap *antloc;
- sbitmap *kill;
- sbitmap **insert;
- sbitmap **delete;
+pre_edge_lcm (int n_exprs, sbitmap *transp,
+ sbitmap *avloc, sbitmap *antloc, sbitmap *kill,
+ sbitmap **insert, sbitmap **delete)
{
sbitmap *antin, *antout, *earliest;
sbitmap *avin, *avout;
num_edges = NUM_EDGES (edge_list);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- fprintf (file, "Edge List:\n");
- verify_edge_list (file, edge_list);
- print_edge_list (file, edge_list);
- dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
- dump_sbitmap_vector (file, "antloc", "", antloc, n_basic_blocks);
- dump_sbitmap_vector (file, "avloc", "", avloc, n_basic_blocks);
- dump_sbitmap_vector (file, "kill", "", kill, n_basic_blocks);
+ fprintf (dump_file, "Edge List:\n");
+ verify_edge_list (dump_file, edge_list);
+ print_edge_list (dump_file, edge_list);
+ dump_sbitmap_vector (dump_file, "transp", "", transp, last_basic_block);
+ dump_sbitmap_vector (dump_file, "antloc", "", antloc, last_basic_block);
+ dump_sbitmap_vector (dump_file, "avloc", "", avloc, last_basic_block);
+ dump_sbitmap_vector (dump_file, "kill", "", kill, last_basic_block);
}
#endif
/* Compute global availability. */
- avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_available (avloc, kill, avout, avin);
sbitmap_vector_free (avin);
/* Compute global anticipatability. */
- antin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- antout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_antinout_edge (antloc, transp, antin, antout);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- dump_sbitmap_vector (file, "antin", "", antin, n_basic_blocks);
- dump_sbitmap_vector (file, "antout", "", antout, n_basic_blocks);
+ dump_sbitmap_vector (dump_file, "antin", "", antin, last_basic_block);
+ dump_sbitmap_vector (dump_file, "antout", "", antout, last_basic_block);
}
#endif
compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest);
#ifdef LCM_DEBUG_INFO
- if (file)
- dump_sbitmap_vector (file, "earliest", "", earliest, num_edges);
+ if (dump_file)
+ dump_sbitmap_vector (dump_file, "earliest", "", earliest, num_edges);
#endif
sbitmap_vector_free (antout);
later = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the exit block in the laterin vector. */
- laterin = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
+ laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
compute_laterin (edge_list, earliest, antloc, later, laterin);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- dump_sbitmap_vector (file, "laterin", "", laterin, n_basic_blocks + 1);
- dump_sbitmap_vector (file, "later", "", later, num_edges);
+ dump_sbitmap_vector (dump_file, "laterin", "", laterin, last_basic_block + 1);
+ dump_sbitmap_vector (dump_file, "later", "", later, num_edges);
}
#endif
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ *delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
sbitmap_vector_free (laterin);
sbitmap_vector_free (later);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
- dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
- n_basic_blocks);
+ dump_sbitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
+ dump_sbitmap_vector (dump_file, "pre_delete_map", "", *delete,
+ last_basic_block);
}
#endif
return edge_list;
}
-/* Availability transfer function */
-static void
-available_transfer_function (bb, changed, in, out, gen, kill, data)
- int bb ATTRIBUTE_UNUSED;
- int *changed;
- sbitmap in,out,gen,kill;
- void *data ATTRIBUTE_UNUSED;
-{
- *changed = sbitmap_union_of_diff (out, gen, in, kill);
-}
-/* Compute the AVIN and AVOUT vectors from the AVLOC and KILL
- vectors. */
+
+/* Compute the AVIN and AVOUT vectors from the AVLOC and KILL vectors.
+ Return the number of passes we performed to iterate to a solution. */
+
void
-compute_available (avloc, kill, avout, avin)
- sbitmap *avloc;
- sbitmap *kill;
- sbitmap *avout;
- sbitmap *avin;
+compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
+ sbitmap *avin)
{
- int *dfs_order;
- int *rc_order;
- bitmap blocks;
- int *inverse_rc_map;
- int i;
- dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
- rc_order = xmalloc (sizeof (int) * n_basic_blocks);
- inverse_rc_map = xmalloc (sizeof (int) * n_basic_blocks);
- flow_depth_first_order_compute (dfs_order, rc_order);
- blocks = BITMAP_XMALLOC ();
- for (i = 0; i < n_basic_blocks; i ++)
- {
- inverse_rc_map[rc_order[i]] = i;
- bitmap_set_bit (blocks, i);
- }
- sbitmap_vector_ones (avout, n_basic_blocks);
- iterative_dataflow_sbitmap (avin, avout, avloc, kill, blocks,
- FORWARD, INTERSECTION,
- available_transfer_function, inverse_rc_map, 0);
- BITMAP_XFREE (blocks);
- free (dfs_order);
- free (rc_order);
- free (inverse_rc_map);
+ edge e;
+ basic_block *worklist, *qin, *qout, *qend, bb;
+ unsigned int qlen;
+ edge_iterator ei;
+
+ /* Allocate a worklist array/queue. Entries are only added to the
+ list if they were not already on the list. So the size is
+ bounded by the number of basic blocks. */
+ qin = qout = worklist =
+ XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+
+ /* We want a maximal solution. */
+ sbitmap_vector_ones (avout, last_basic_block);
+
+ /* Put every block on the worklist; this is necessary because of the
+ optimistic initialization of AVOUT above. */
+ FOR_EACH_BB (bb)
+ {
+ *qin++ = bb;
+ bb->aux = bb;
+ }
+
+ qin = worklist;
+ qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+
+ /* Mark blocks which are successors of the entry block so that we
+ can easily identify them below. */
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ e->dest->aux = ENTRY_BLOCK_PTR;
+
+ /* Iterate until the worklist is empty. */
+ while (qlen)
+ {
+ /* Take the first entry off the worklist. */
+ bb = *qout++;
+ qlen--;
+
+ if (qout >= qend)
+ qout = worklist;
+
+ /* If one of the predecessor blocks is the ENTRY block, then the
+ intersection of avouts is the null set. We can identify such blocks
+ by the special value in the AUX field in the block structure. */
+ if (bb->aux == ENTRY_BLOCK_PTR)
+ /* Do not clear the aux field for blocks which are successors of the
+ ENTRY block. That way we never add then to the worklist again. */
+ sbitmap_zero (avin[bb->index]);
+ else
+ {
+ /* Clear the aux field of this block so that it can be added to
+ the worklist again if necessary. */
+ bb->aux = NULL;
+ sbitmap_intersection_of_preds (avin[bb->index], avout, bb->index);
+ }
+
+ if (sbitmap_union_of_diff_cg (avout[bb->index], avloc[bb->index],
+ avin[bb->index], kill[bb->index]))
+ /* If the out state of this block changed, then we need
+ to add the successors of this block to the worklist
+ if they are not already on the worklist. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
+ {
+ *qin++ = e->dest;
+ e->dest->aux = e;
+ qlen++;
+
+ if (qin >= qend)
+ qin = worklist;
+ }
+ }
+
+ clear_aux_for_edges ();
+ clear_aux_for_blocks ();
+ free (worklist);
}
/* Compute the farthest vector for edge based lcm. */
static void
-compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
- kill, farthest)
- struct edge_list *edge_list;
- int n_exprs;
- sbitmap *st_avout, *st_avin, *st_antin, *kill, *farthest;
+compute_farthest (struct edge_list *edge_list, int n_exprs,
+ sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin,
+ sbitmap *kill, sbitmap *farthest)
{
sbitmap difference, temp_bitmap;
int x, num_edges;
}
}
- free (temp_bitmap);
- free (difference);
+ sbitmap_free (temp_bitmap);
+ sbitmap_free (difference);
}
/* Compute nearer and nearerout vectors for edge based lcm.
implementation can be found before compute_laterin. */
static void
-compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
- struct edge_list *edge_list;
- sbitmap *farthest, *st_avloc, *nearer, *nearerout;
+compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
+ sbitmap *st_avloc, sbitmap *nearer, sbitmap *nearerout)
{
- int bb, num_edges, i;
+ int num_edges, i;
edge e;
- basic_block *worklist, *tos;
+ basic_block *worklist, *tos, bb;
+ edge_iterator ei;
num_edges = NUM_EDGES (edge_list);
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- tos = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
do not want to be overly optimistic. Consider an incoming edge to
the exit block. That edge should always have a NEARER value the
same as FARTHEST for that edge. */
- for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]);
/* Add all the blocks to the worklist. This prevents an early exit
from the loop given our optimistic initialization of NEARER. */
- for (bb = 0; bb < n_basic_blocks; bb++)
+ FOR_EACH_BB (bb)
{
- basic_block b = BASIC_BLOCK (bb);
- *tos++ = b;
- b->aux = b;
+ *tos++ = bb;
+ bb->aux = bb;
}
/* Iterate until the worklist is empty. */
while (tos != worklist)
{
/* Take the first entry off the worklist. */
- basic_block b = *--tos;
- b->aux = NULL;
+ bb = *--tos;
+ bb->aux = NULL;
/* Compute the intersection of NEARER for each outgoing edge from B. */
- bb = b->index;
- sbitmap_ones (nearerout[bb]);
- for (e = b->succ; e != NULL; e = e->succ_next)
- sbitmap_a_and_b (nearerout[bb], nearerout[bb],
+ sbitmap_ones (nearerout[bb->index]);
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index],
nearer[(size_t) e->aux]);
/* Calculate NEARER for all incoming edges. */
- for (e = b->pred; e != NULL; e = e->pred_next)
- if (sbitmap_union_of_diff (nearer[(size_t) e->aux],
- farthest[(size_t) e->aux],
- nearerout[e->dest->index],
- st_avloc[e->dest->index])
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux],
+ farthest[(size_t) e->aux],
+ nearerout[e->dest->index],
+ st_avloc[e->dest->index])
/* If NEARER for an incoming edge was changed, then we need
to add the source of the incoming edge to the worklist. */
&& e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
/* Computation of insertion and deletion points requires computing NEAREROUT
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
- sbitmap_ones (nearerout[n_basic_blocks]);
- for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next)
- sbitmap_a_and_b (nearerout[n_basic_blocks],
- nearerout[n_basic_blocks],
+ sbitmap_ones (nearerout[last_basic_block]);
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ sbitmap_a_and_b (nearerout[last_basic_block],
+ nearerout[last_basic_block],
nearer[(size_t) e->aux]);
clear_aux_for_edges ();
/* Compute the insertion and deletion points for edge based LCM. */
static void
-compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
- insert, delete)
- struct edge_list *edge_list;
- sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete;
+compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
+ sbitmap *nearer, sbitmap *nearerout,
+ sbitmap *insert, sbitmap *delete)
{
int x;
+ basic_block bb;
- for (x = 0; x < n_basic_blocks; x++)
- sbitmap_difference (delete[x], st_avloc[x], nearerout[x]);
+ FOR_EACH_BB (bb)
+ sbitmap_difference (delete[bb->index], st_avloc[bb->index],
+ nearerout[bb->index]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
if (b == ENTRY_BLOCK_PTR)
- sbitmap_difference (insert[x], nearer[x], nearerout[n_basic_blocks]);
+ sbitmap_difference (insert[x], nearer[x], nearerout[last_basic_block]);
else
sbitmap_difference (insert[x], nearer[x], nearerout[b->index]);
}
an expression should be inserted on. */
struct edge_list *
-pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
- insert, delete)
- FILE *file ATTRIBUTE_UNUSED;
- int n_exprs;
- sbitmap *transp;
- sbitmap *st_avloc;
- sbitmap *st_antloc;
- sbitmap *kill;
- sbitmap **insert;
- sbitmap **delete;
+pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
+ sbitmap *st_avloc, sbitmap *st_antloc, sbitmap *kill,
+ sbitmap **insert, sbitmap **delete)
{
sbitmap *st_antin, *st_antout;
sbitmap *st_avout, *st_avin, *farthest;
edge_list = create_edge_list ();
num_edges = NUM_EDGES (edge_list);
- st_antin = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- st_antout = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- sbitmap_vector_zero (st_antin, n_basic_blocks);
- sbitmap_vector_zero (st_antout, n_basic_blocks);
+ st_antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ sbitmap_vector_zero (st_antin, last_basic_block);
+ sbitmap_vector_zero (st_antout, last_basic_block);
compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
/* Compute global anticipatability. */
- st_avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- st_avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ st_avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_available (st_avloc, kill, st_avout, st_avin);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- fprintf (file, "Edge List:\n");
- verify_edge_list (file, edge_list);
- print_edge_list (file, edge_list);
- dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
- dump_sbitmap_vector (file, "st_avloc", "", st_avloc, n_basic_blocks);
- dump_sbitmap_vector (file, "st_antloc", "", st_antloc, n_basic_blocks);
- dump_sbitmap_vector (file, "st_antin", "", st_antin, n_basic_blocks);
- dump_sbitmap_vector (file, "st_antout", "", st_antout, n_basic_blocks);
- dump_sbitmap_vector (file, "st_kill", "", kill, n_basic_blocks);
+ fprintf (dump_file, "Edge List:\n");
+ verify_edge_list (dump_file, edge_list);
+ print_edge_list (dump_file, edge_list);
+ dump_sbitmap_vector (dump_file, "transp", "", transp, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_avloc", "", st_avloc, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_antin", "", st_antin, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_antout", "", st_antout, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_kill", "", kill, last_basic_block);
}
#endif
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- dump_sbitmap_vector (file, "st_avout", "", st_avout, n_basic_blocks);
- dump_sbitmap_vector (file, "st_avin", "", st_avin, n_basic_blocks);
+ dump_sbitmap_vector (dump_file, "st_avout", "", st_avout, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_avin", "", st_avin, last_basic_block);
}
#endif
kill, farthest);
#ifdef LCM_DEBUG_INFO
- if (file)
- dump_sbitmap_vector (file, "farthest", "", farthest, num_edges);
+ if (dump_file)
+ dump_sbitmap_vector (dump_file, "farthest", "", farthest, num_edges);
#endif
sbitmap_vector_free (st_antin);
nearer = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the entry block. */
- nearerout = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
+ nearerout = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- dump_sbitmap_vector (file, "nearerout", "", nearerout,
- n_basic_blocks + 1);
- dump_sbitmap_vector (file, "nearer", "", nearer, num_edges);
+ dump_sbitmap_vector (dump_file, "nearerout", "", nearerout,
+ last_basic_block + 1);
+ dump_sbitmap_vector (dump_file, "nearer", "", nearer, num_edges);
}
#endif
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ *delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *delete);
sbitmap_vector_free (nearer);
#ifdef LCM_DEBUG_INFO
- if (file)
+ if (dump_file)
{
- dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
- dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
- n_basic_blocks);
+ dump_sbitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
+ dump_sbitmap_vector (dump_file, "pre_delete_map", "", *delete,
+ last_basic_block);
}
#endif
return edge_list;
}
-/* Mode switching:
-
- The algorithm for setting the modes consists of scanning the insn list
- and finding all the insns which require a specific mode. Each insn gets
- a unique struct seginfo element. These structures are inserted into a list
- for each basic block. For each entity, there is an array of bb_info over
- the flow graph basic blocks (local var 'bb_info'), and contains a list
- of all insns within that basic block, in the order they are encountered.
-
- For each entity, any basic block WITHOUT any insns requiring a specific
- mode are given a single entry, without a mode. (Each basic block
- in the flow graph must have at least one entry in the segment table.)
-
- The LCM algorithm is then run over the flow graph to determine where to
- place the sets to the highest-priority value in respect of first the first
- insn in any one block. Any adjustments required to the transparancy
- vectors are made, then the next iteration starts for the next-lower
- priority mode, till for each entity all modes are exhasted.
-
- More details are located in the code for optimize_mode_switching(). */
-
-/* This structure contains the information for each insn which requires
- either single or double mode to be set.
- MODE is the mode this insn must be executed in.
- INSN_PTR is the insn to be executed (may be the note that marks the
- beginning of a basic block).
- BBNUM is the flow graph basic block this insn occurs in.
- NEXT is the next insn in the same basic block. */
-struct seginfo
-{
- int mode;
- rtx insn_ptr;
- int bbnum;
- struct seginfo *next;
- HARD_REG_SET regs_live;
-};
-
-struct lcm_bb_info
-{
- struct seginfo *seginfo;
- int computing;
-};
-
-/* These bitmaps are used for the LCM algorithm. */
-
-#ifdef OPTIMIZE_MODE_SWITCHING
-static sbitmap *antic;
-static sbitmap *transp;
-static sbitmap *comp;
-static sbitmap *delete;
-static sbitmap *insert;
-
-static struct seginfo * new_seginfo PARAMS ((int, rtx, int, HARD_REG_SET));
-static void add_seginfo PARAMS ((struct lcm_bb_info *, struct seginfo *));
-static void reg_dies PARAMS ((rtx, HARD_REG_SET));
-static void reg_becomes_live PARAMS ((rtx, rtx, void *));
-static void make_preds_opaque PARAMS ((basic_block, int));
-#endif
-\f
-#ifdef OPTIMIZE_MODE_SWITCHING
-
-/* This function will allocate a new BBINFO structure, initialized
- with the MODE, INSN, and basic block BB parameters. */
-
-static struct seginfo *
-new_seginfo (mode, insn, bb, regs_live)
- int mode;
- rtx insn;
- int bb;
- HARD_REG_SET regs_live;
-{
- struct seginfo *ptr;
- ptr = xmalloc (sizeof (struct seginfo));
- ptr->mode = mode;
- ptr->insn_ptr = insn;
- ptr->bbnum = bb;
- ptr->next = NULL;
- COPY_HARD_REG_SET (ptr->regs_live, regs_live);
- return ptr;
-}
-
-/* Add a seginfo element to the end of a list.
- HEAD is a pointer to the list beginning.
- INFO is the structure to be linked in. */
-
-static void
-add_seginfo (head, info)
- struct lcm_bb_info *head;
- struct seginfo *info;
-{
- struct seginfo *ptr;
-
- if (head->seginfo == NULL)
- head->seginfo = info;
- else
- {
- ptr = head->seginfo;
- while (ptr->next != NULL)
- ptr = ptr->next;
- ptr->next = info;
- }
-}
-
-/* Make all predecessors of basic block B opaque, recursively, till we hit
- some that are already non-transparent, or an edge where aux is set; that
- denotes that a mode set is to be done on that edge.
- J is the bit number in the bitmaps that corresponds to the entity that
- we are currently handling mode-switching for. */
-
-static void
-make_preds_opaque (b, j)
- basic_block b;
- int j;
-{
- edge e;
-
- for (e = b->pred; e; e = e->pred_next)
- {
- basic_block pb = e->src;
-
- if (e->aux || ! TEST_BIT (transp[pb->index], j))
- continue;
-
- RESET_BIT (transp[pb->index], j);
- make_preds_opaque (pb, j);
- }
-}
-
-/* Record in LIVE that register REG died. */
-
-static void
-reg_dies (reg, live)
- rtx reg;
- HARD_REG_SET live;
-{
- int regno, nregs;
-
- if (GET_CODE (reg) != REG)
- return;
-
- regno = REGNO (reg);
- if (regno < FIRST_PSEUDO_REGISTER)
- for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0;
- nregs--)
- CLEAR_HARD_REG_BIT (live, regno + nregs);
-}
-
-/* Record in LIVE that register REG became live.
- This is called via note_stores. */
-
-static void
-reg_becomes_live (reg, setter, live)
- rtx reg;
- rtx setter ATTRIBUTE_UNUSED;
- void *live;
-{
- int regno, nregs;
-
- if (GET_CODE (reg) == SUBREG)
- reg = SUBREG_REG (reg);
-
- if (GET_CODE (reg) != REG)
- return;
-
- regno = REGNO (reg);
- if (regno < FIRST_PSEUDO_REGISTER)
- for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0;
- nregs--)
- SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs);
-}
-
-/* Find all insns that need a particular mode setting, and insert the
- necessary mode switches. Return true if we did work. */
-
-int
-optimize_mode_switching (file)
- FILE *file;
-{
- rtx insn;
- int bb, e;
- int need_commit = 0;
- sbitmap *kill;
- struct edge_list *edge_list;
- static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
-#define N_ENTITIES (sizeof num_modes / sizeof (int))
- int entity_map[N_ENTITIES];
- struct lcm_bb_info *bb_info[N_ENTITIES];
- int i, j;
- int n_entities;
- int max_num_modes = 0;
- bool emited = false;
-
-#ifdef NORMAL_MODE
- /* Increment n_basic_blocks before allocating bb_info. */
- n_basic_blocks++;
-#endif
-
- for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
- if (OPTIMIZE_MODE_SWITCHING (e))
- {
- /* Create the list of segments within each basic block. */
- bb_info[n_entities]
- = (struct lcm_bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info);
- entity_map[n_entities++] = e;
- if (num_modes[e] > max_num_modes)
- max_num_modes = num_modes[e];
- }
-
-#ifdef NORMAL_MODE
- /* Decrement it back in case we return below. */
- n_basic_blocks--;
-#endif
-
- if (! n_entities)
- return 0;
-
-#ifdef NORMAL_MODE
- /* We're going to pretend the EXIT_BLOCK is a regular basic block,
- so that switching back to normal mode when entering the
- EXIT_BLOCK isn't optimized away. We do this by incrementing the
- basic block count, growing the VARRAY of basic_block_info and
- appending the EXIT_BLOCK_PTR to it. */
- n_basic_blocks++;
- if (VARRAY_SIZE (basic_block_info) < n_basic_blocks)
- VARRAY_GROW (basic_block_info, n_basic_blocks);
- BASIC_BLOCK (n_basic_blocks - 1) = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->index = n_basic_blocks - 1;
-#endif
-
- /* Create the bitmap vectors. */
-
- antic = sbitmap_vector_alloc (n_basic_blocks, n_entities);
- transp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
- comp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
-
- sbitmap_vector_ones (transp, n_basic_blocks);
-
- for (j = n_entities - 1; j >= 0; j--)
- {
- int e = entity_map[j];
- int no_mode = num_modes[e];
- struct lcm_bb_info *info = bb_info[j];
-
- /* Determine what the first use (if any) need for a mode of entity E is.
- This will be the mode that is anticipatable for this block.
- Also compute the initial transparency settings. */
- for (bb = 0 ; bb < n_basic_blocks; bb++)
- {
- struct seginfo *ptr;
- int last_mode = no_mode;
- HARD_REG_SET live_now;
-
- REG_SET_TO_HARD_REG_SET (live_now,
- BASIC_BLOCK (bb)->global_live_at_start);
- for (insn = BLOCK_HEAD (bb);
- insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
- insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn))
- {
- int mode = MODE_NEEDED (e, insn);
- rtx link;
-
- if (mode != no_mode && mode != last_mode)
- {
- last_mode = mode;
- ptr = new_seginfo (mode, insn, bb, live_now);
- add_seginfo (info + bb, ptr);
- RESET_BIT (transp[bb], j);
- }
-
- /* Update LIVE_NOW. */
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_DEAD)
- reg_dies (XEXP (link, 0), live_now);
-
- note_stores (PATTERN (insn), reg_becomes_live, &live_now);
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_UNUSED)
- reg_dies (XEXP (link, 0), live_now);
- }
- }
-
- info[bb].computing = last_mode;
- /* Check for blocks without ANY mode requirements. */
- if (last_mode == no_mode)
- {
- ptr = new_seginfo (no_mode, insn, bb, live_now);
- add_seginfo (info + bb, ptr);
- }
- }
-#ifdef NORMAL_MODE
- {
- int mode = NORMAL_MODE (e);
-
- if (mode != no_mode)
- {
- edge eg;
-
- for (eg = ENTRY_BLOCK_PTR->succ; eg; eg = eg->succ_next)
- {
- bb = eg->dest->index;
-
- /* By always making this nontransparent, we save
- an extra check in make_preds_opaque. We also
- need this to avoid confusing pre_edge_lcm when
- antic is cleared but transp and comp are set. */
- RESET_BIT (transp[bb], j);
-
- /* If the block already has MODE, pretend it
- has none (because we don't need to set it),
- but retain whatever mode it computes. */
- if (info[bb].seginfo->mode == mode)
- info[bb].seginfo->mode = no_mode;
-
- /* Insert a fake computing definition of MODE into entry
- blocks which compute no mode. This represents the mode on
- entry. */
- else if (info[bb].computing == no_mode)
- {
- info[bb].computing = mode;
- info[bb].seginfo->mode = no_mode;
- }
- }
-
- bb = n_basic_blocks - 1;
- info[bb].seginfo->mode = mode;
- }
- }
-#endif /* NORMAL_MODE */
- }
-
- kill = sbitmap_vector_alloc (n_basic_blocks, n_entities);
- for (i = 0; i < max_num_modes; i++)
- {
- int current_mode[N_ENTITIES];
-
- /* Set the anticipatable and computing arrays. */
- sbitmap_vector_zero (antic, n_basic_blocks);
- sbitmap_vector_zero (comp, n_basic_blocks);
- for (j = n_entities - 1; j >= 0; j--)
- {
- int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
- struct lcm_bb_info *info = bb_info[j];
-
- for (bb = 0 ; bb < n_basic_blocks; bb++)
- {
- if (info[bb].seginfo->mode == m)
- SET_BIT (antic[bb], j);
-
- if (info[bb].computing == m)
- SET_BIT (comp[bb], j);
- }
- }
-
- /* Calculate the optimal locations for the
- placement mode switches to modes with priority I. */
-
- for (bb = n_basic_blocks - 1; bb >= 0; bb--)
- sbitmap_not (kill[bb], transp[bb]);
- edge_list = pre_edge_lcm (file, 1, transp, comp, antic,
- kill, &insert, &delete);
-
- for (j = n_entities - 1; j >= 0; j--)
- {
- /* Insert all mode sets that have been inserted by lcm. */
- int no_mode = num_modes[entity_map[j]];
-
- /* Wherever we have moved a mode setting upwards in the flow graph,
- the blocks between the new setting site and the now redundant
- computation ceases to be transparent for any lower-priority
- mode of the same entity. First set the aux field of each
- insertion site edge non-transparent, then propagate the new
- non-transparency from the redundant computation upwards till
- we hit an insertion site or an already non-transparent block. */
- for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
- {
- edge eg = INDEX_EDGE (edge_list, e);
- int mode;
- basic_block src_bb;
- HARD_REG_SET live_at_edge;
- rtx mode_set;
-
- eg->aux = 0;
-
- if (! TEST_BIT (insert[e], j))
- continue;
-
- eg->aux = (void *)1;
-
- mode = current_mode[j];
- src_bb = eg->src;
-
- REG_SET_TO_HARD_REG_SET (live_at_edge,
- src_bb->global_live_at_end);
-
- start_sequence ();
- EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
- mode_set = gen_sequence ();
- end_sequence ();
-
- /* Do not bother to insert empty sequence. */
- if (GET_CODE (mode_set) == SEQUENCE
- && !XVECLEN (mode_set, 0))
- continue;
-
- /* If this is an abnormal edge, we'll insert at the end
- of the previous block. */
- if (eg->flags & EDGE_ABNORMAL)
- {
- emited = true;
- if (GET_CODE (src_bb->end) == JUMP_INSN)
- emit_insn_before (mode_set, src_bb->end);
- /* It doesn't make sense to switch to normal mode
- after a CALL_INSN, so we're going to abort if we
- find one. The cases in which a CALL_INSN may
- have an abnormal edge are sibcalls and EH edges.
- In the case of sibcalls, the dest basic-block is
- the EXIT_BLOCK, that runs in normal mode; it is
- assumed that a sibcall insn requires normal mode
- itself, so no mode switch would be required after
- the call (it wouldn't make sense, anyway). In
- the case of EH edges, EH entry points also start
- in normal mode, so a similar reasoning applies. */
- else if (GET_CODE (src_bb->end) == INSN)
- emit_insn_after (mode_set, src_bb->end);
- else
- abort ();
- bb_info[j][src_bb->index].computing = mode;
- RESET_BIT (transp[src_bb->index], j);
- }
- else
- {
- need_commit = 1;
- insert_insn_on_edge (mode_set, eg);
- }
- }
-
- for (bb = n_basic_blocks - 1; bb >= 0; bb--)
- if (TEST_BIT (delete[bb], j))
- {
- make_preds_opaque (BASIC_BLOCK (bb), j);
- /* Cancel the 'deleted' mode set. */
- bb_info[j][bb].seginfo->mode = no_mode;
- }
- }
-
- clear_aux_for_edges ();
- free_edge_list (edge_list);
- }
-
-#ifdef NORMAL_MODE
- /* Restore the special status of EXIT_BLOCK. */
- n_basic_blocks--;
- VARRAY_POP (basic_block_info);
- EXIT_BLOCK_PTR->index = EXIT_BLOCK;
-#endif
-
- /* Now output the remaining mode sets in all the segments. */
- for (j = n_entities - 1; j >= 0; j--)
- {
- int no_mode = num_modes[entity_map[j]];
-
-#ifdef NORMAL_MODE
- if (bb_info[j][n_basic_blocks].seginfo->mode != no_mode)
- {
- edge eg;
- struct seginfo *ptr = bb_info[j][n_basic_blocks].seginfo;
-
- for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next)
- {
- rtx mode_set;
-
- if (bb_info[j][eg->src->index].computing == ptr->mode)
- continue;
-
- start_sequence ();
- EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
- mode_set = gen_sequence ();
- end_sequence ();
-
- /* Do not bother to insert empty sequence. */
- if (GET_CODE (mode_set) == SEQUENCE
- && !XVECLEN (mode_set, 0))
- continue;
-
- /* If this is an abnormal edge, we'll insert at the end of the
- previous block. */
- if (eg->flags & EDGE_ABNORMAL)
- {
- emited = true;
- if (GET_CODE (eg->src->end) == JUMP_INSN)
- emit_insn_before (mode_set, eg->src->end);
- else if (GET_CODE (eg->src->end) == INSN)
- emit_insn_after (mode_set, eg->src->end);
- else
- abort ();
- }
- else
- {
- need_commit = 1;
- insert_insn_on_edge (mode_set, eg);
- }
- }
-
- }
-#endif
-
- for (bb = n_basic_blocks - 1; bb >= 0; bb--)
- {
- struct seginfo *ptr, *next;
- for (ptr = bb_info[j][bb].seginfo; ptr; ptr = next)
- {
- next = ptr->next;
- if (ptr->mode != no_mode)
- {
- rtx mode_set;
-
- start_sequence ();
- EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
- mode_set = gen_sequence ();
- end_sequence ();
-
- /* Do not bother to insert empty sequence. */
- if (GET_CODE (mode_set) == SEQUENCE
- && !XVECLEN (mode_set, 0))
- continue;
-
- emited = true;
- if (GET_CODE (ptr->insn_ptr) == NOTE
- && (NOTE_LINE_NUMBER (ptr->insn_ptr)
- == NOTE_INSN_BASIC_BLOCK))
- emit_insn_after (mode_set, ptr->insn_ptr);
- else
- emit_insn_before (mode_set, ptr->insn_ptr);
- }
-
- free (ptr);
- }
- }
-
- free (bb_info[j]);
- }
-
- /* Finished. Free up all the things we've allocated. */
-
- sbitmap_vector_free (kill);
- sbitmap_vector_free (antic);
- sbitmap_vector_free (transp);
- sbitmap_vector_free (comp);
- sbitmap_vector_free (delete);
- sbitmap_vector_free (insert);
-
- if (need_commit)
- commit_edge_insertions ();
-
- if (!need_commit && !emited)
- return 0;
-
- /* Ideally we'd figure out what blocks were affected and start from
- there, but this is enormously complicated by commit_edge_insertions,
- which would screw up any indices we'd collected, and also need to
- be involved in the update. Bail and recompute global life info for
- everything. */
-
- allocate_reg_life_data ();
- update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
- (PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE
- | PROP_SCAN_DEAD_CODE | PROP_REG_INFO));
-
- return 1;
-}
-#endif /* OPTIMIZE_MODE_SWITCHING */