/* Generic partial redundancy elimination with lazy code motion support.
- Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
This file is part of GCC.
#include "insn-attr.h"
/* Edge based LCM routines. */
-static void compute_antinout_edge PARAMS ((sbitmap *, sbitmap *,
- sbitmap *, sbitmap *));
-static void compute_earliest PARAMS ((struct edge_list *, int,
- sbitmap *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-static void compute_laterin PARAMS ((struct edge_list *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-static void compute_insert_delete PARAMS ((struct edge_list *edge_list,
- sbitmap *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
+static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *);
+static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *);
+static void compute_laterin (struct edge_list *, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *);
+static void compute_insert_delete (struct edge_list *edge_list, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *, sbitmap *);
/* Edge based LCM routines on a reverse flowgraph. */
-static void compute_farthest PARAMS ((struct edge_list *, int,
- sbitmap *, sbitmap *,
- sbitmap*, sbitmap *,
- sbitmap *));
-static void compute_nearerout PARAMS ((struct edge_list *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
-static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list,
- sbitmap *, sbitmap *,
- sbitmap *, sbitmap *,
- sbitmap *));
+static void compute_farthest (struct edge_list *, int, sbitmap *, sbitmap *,
+ sbitmap*, sbitmap *, sbitmap *);
+static void compute_nearerout (struct edge_list *, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *);
+static void compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *,
+ sbitmap *);
\f
/* Edge based lcm routines. */
Other than that, its pretty much identical to compute_antinout. */
static void
-compute_antinout_edge (antloc, transp, antin, antout)
- sbitmap *antloc;
- sbitmap *transp;
- sbitmap *antin;
- sbitmap *antout;
+compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
+ sbitmap *antout)
{
basic_block bb;
edge e;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
+ qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
/* Compute the earliest vector for edge based lcm. */
static void
-compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest)
- struct edge_list *edge_list;
- int n_exprs;
- sbitmap *antin, *antout, *avout, *kill, *earliest;
+compute_earliest (struct edge_list *edge_list, int n_exprs, sbitmap *antin,
+ sbitmap *antout, sbitmap *avout, sbitmap *kill,
+ sbitmap *earliest)
{
sbitmap difference, temp_bitmap;
int x, num_edges;
to compute it. */
static void
-compute_laterin (edge_list, earliest, antloc, later, laterin)
- struct edge_list *edge_list;
- sbitmap *earliest, *antloc, *later, *laterin;
+compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
+ sbitmap *antloc, sbitmap *later, sbitmap *laterin)
{
int num_edges, i;
edge e;
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
/* Compute the insertion and deletion points for edge based LCM. */
static void
-compute_insert_delete (edge_list, antloc, later, laterin,
- insert, delete)
- struct edge_list *edge_list;
- sbitmap *antloc, *later, *laterin, *insert, *delete;
+compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
+ sbitmap *later, sbitmap *laterin, sbitmap *insert,
+ sbitmap *delete)
{
int x;
basic_block bb;
map the insert vector to what edge an expression should be inserted on. */
struct edge_list *
-pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
- FILE *file ATTRIBUTE_UNUSED;
- int n_exprs;
- sbitmap *transp;
- sbitmap *avloc;
- sbitmap *antloc;
- sbitmap *kill;
- sbitmap **insert;
- sbitmap **delete;
+pre_edge_lcm (FILE *file ATTRIBUTE_UNUSED, int n_exprs, sbitmap *transp,
+ sbitmap *avloc, sbitmap *antloc, sbitmap *kill,
+ sbitmap **insert, sbitmap **delete)
{
sbitmap *antin, *antout, *earliest;
sbitmap *avin, *avout;
Return the number of passes we performed to iterate to a solution. */
void
-compute_available (avloc, kill, avout, avin)
- sbitmap *avloc, *kill, *avout, *avin;
+compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
+ sbitmap *avin)
{
edge e;
basic_block *worklist, *qin, *qout, *qend, bb;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
+ qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
/* We want a maximal solution. */
sbitmap_vector_ones (avout, last_basic_block);
/* Compute the farthest vector for edge based lcm. */
static void
-compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
- kill, farthest)
- struct edge_list *edge_list;
- int n_exprs;
- sbitmap *st_avout, *st_avin, *st_antin, *kill, *farthest;
+compute_farthest (struct edge_list *edge_list, int n_exprs,
+ sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin,
+ sbitmap *kill, sbitmap *farthest)
{
sbitmap difference, temp_bitmap;
int x, num_edges;
implementation can be found before compute_laterin. */
static void
-compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
- struct edge_list *edge_list;
- sbitmap *farthest, *st_avloc, *nearer, *nearerout;
+compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
+ sbitmap *st_avloc, sbitmap *nearer, sbitmap *nearerout)
{
int num_edges, i;
edge e;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- tos = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
/* Compute the insertion and deletion points for edge based LCM. */
static void
-compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
- insert, delete)
- struct edge_list *edge_list;
- sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete;
+compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
+ sbitmap *nearer, sbitmap *nearerout,
+ sbitmap *insert, sbitmap *delete)
{
int x;
basic_block bb;
an expression should be inserted on. */
struct edge_list *
-pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
- insert, delete)
- FILE *file ATTRIBUTE_UNUSED;
- int n_exprs;
- sbitmap *transp;
- sbitmap *st_avloc;
- sbitmap *st_antloc;
- sbitmap *kill;
- sbitmap **insert;
- sbitmap **delete;
+pre_edge_rev_lcm (FILE *file ATTRIBUTE_UNUSED, int n_exprs, sbitmap *transp,
+ sbitmap *st_avloc, sbitmap *st_antloc, sbitmap *kill,
+ sbitmap **insert, sbitmap **delete)
{
sbitmap *st_antin, *st_antout;
sbitmap *st_avout, *st_avin, *farthest;
edge_list = create_edge_list ();
num_edges = NUM_EDGES (edge_list);
- st_antin = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
- st_antout = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
sbitmap_vector_zero (st_antin, last_basic_block);
sbitmap_vector_zero (st_antout, last_basic_block);
compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
static sbitmap *delete;
static sbitmap *insert;
-static struct seginfo * new_seginfo PARAMS ((int, rtx, int, HARD_REG_SET));
-static void add_seginfo PARAMS ((struct bb_info *, struct seginfo *));
-static void reg_dies PARAMS ((rtx, HARD_REG_SET));
-static void reg_becomes_live PARAMS ((rtx, rtx, void *));
-static void make_preds_opaque PARAMS ((basic_block, int));
+static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET);
+static void add_seginfo (struct bb_info *, struct seginfo *);
+static void reg_dies (rtx, HARD_REG_SET);
+static void reg_becomes_live (rtx, rtx, void *);
+static void make_preds_opaque (basic_block, int);
#endif
\f
#ifdef OPTIMIZE_MODE_SWITCHING
with the MODE, INSN, and basic block BB parameters. */
static struct seginfo *
-new_seginfo (mode, insn, bb, regs_live)
- int mode;
- rtx insn;
- int bb;
- HARD_REG_SET regs_live;
+new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
{
struct seginfo *ptr;
ptr = xmalloc (sizeof (struct seginfo));
INFO is the structure to be linked in. */
static void
-add_seginfo (head, info)
- struct bb_info *head;
- struct seginfo *info;
+add_seginfo (struct bb_info *head, struct seginfo *info)
{
struct seginfo *ptr;
we are currently handling mode-switching for. */
static void
-make_preds_opaque (b, j)
- basic_block b;
- int j;
+make_preds_opaque (basic_block b, int j)
{
edge e;
/* Record in LIVE that register REG died. */
static void
-reg_dies (reg, live)
- rtx reg;
- HARD_REG_SET live;
+reg_dies (rtx reg, HARD_REG_SET live)
{
int regno, nregs;
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
- for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0;
+ for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0;
nregs--)
CLEAR_HARD_REG_BIT (live, regno + nregs);
}
This is called via note_stores. */
static void
-reg_becomes_live (reg, setter, live)
- rtx reg;
- rtx setter ATTRIBUTE_UNUSED;
- void *live;
+reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live)
{
int regno, nregs;
if (GET_CODE (reg) == SUBREG)
reg = SUBREG_REG (reg);
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
return;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
- for (nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1; nregs >= 0;
+ for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0;
nregs--)
SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs);
}
+/* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined
+ and vice versa. */
+#if defined (MODE_ENTRY) != defined (MODE_EXIT)
+ #error "Both MODE_ENTRY and MODE_EXIT must be defined"
+#endif
+
/* Find all insns that need a particular mode setting, and insert the
necessary mode switches. Return true if we did work. */
int
-optimize_mode_switching (file)
- FILE *file;
+optimize_mode_switching (FILE *file)
{
rtx insn;
int e;
/* Create the list of segments within each basic block.
If NORMAL_MODE is defined, allow for two extra
blocks split from the entry and exit block. */
-#ifdef NORMAL_MODE
+#if defined (MODE_ENTRY) && defined (MODE_EXIT)
entry_exit_extra = 2;
#endif
bb_info[n_entities]
- = (struct bb_info *) xcalloc (last_basic_block + entry_exit_extra,
- sizeof **bb_info);
+ = xcalloc (last_basic_block + entry_exit_extra, sizeof **bb_info);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
if (! n_entities)
return 0;
-#ifdef NORMAL_MODE
+#if defined (MODE_ENTRY) && defined (MODE_EXIT)
{
/* Split the edge from the entry block and the fallthrough edge to the
exit block, so that we can note that there NORMAL_MODE is supplied /
REG_SET_TO_HARD_REG_SET (live_now,
bb->global_live_at_start);
- for (insn = bb->head;
- insn != NULL && insn != NEXT_INSN (bb->end);
+ for (insn = BB_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BB_END (bb));
insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
add_seginfo (info + bb->index, ptr);
RESET_BIT (transp[bb->index], j);
}
-
+#ifdef MODE_AFTER
+ last_mode = MODE_AFTER (last_mode, insn);
+#endif
/* Update LIVE_NOW. */
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
/* Check for blocks without ANY mode requirements. */
if (last_mode == no_mode)
{
- ptr = new_seginfo (no_mode, bb->end, bb->index, live_now);
+ ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
add_seginfo (info + bb->index, ptr);
}
}
-#ifdef NORMAL_MODE
+#if defined (MODE_ENTRY) && defined (MODE_EXIT)
{
- int mode = NORMAL_MODE (e);
+ int mode = MODE_ENTRY (e);
if (mode != no_mode)
{
info[bb->index].computing = mode;
if (pre_exit)
- info[pre_exit->index].seginfo->mode = mode;
+ info[pre_exit->index].seginfo->mode = MODE_EXIT (e);
}
}
#endif /* NORMAL_MODE */
if (eg->flags & EDGE_ABNORMAL)
{
emited = true;
- if (GET_CODE (src_bb->end) == JUMP_INSN)
- emit_insn_before (mode_set, src_bb->end);
+ if (JUMP_P (BB_END (src_bb)))
+ emit_insn_before (mode_set, BB_END (src_bb));
/* It doesn't make sense to switch to normal mode
after a CALL_INSN, so we're going to abort if we
find one. The cases in which a CALL_INSN may
the call (it wouldn't make sense, anyway). In
the case of EH edges, EH entry points also start
in normal mode, so a similar reasoning applies. */
- else if (GET_CODE (src_bb->end) == INSN)
- emit_insn_after (mode_set, src_bb->end);
+ else if (NONJUMP_INSN_P (BB_END (src_bb)))
+ emit_insn_after (mode_set, BB_END (src_bb));
else
abort ();
bb_info[j][src_bb->index].computing = mode;
continue;
emited = true;
- if (GET_CODE (ptr->insn_ptr) == NOTE
+ if (NOTE_P (ptr->insn_ptr)
&& (NOTE_LINE_NUMBER (ptr->insn_ptr)
== NOTE_INSN_BASIC_BLOCK))
emit_insn_after (mode_set, ptr->insn_ptr);
if (need_commit)
commit_edge_insertions ();
-#ifdef NORMAL_MODE
+#if defined (MODE_ENTRY) && defined (MODE_EXIT)
cleanup_cfg (CLEANUP_NO_INSN_DEL);
#else
if (!need_commit && !emited)