/* Dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
mhayes@redhat.com)
df_init simply creates a poor man's object (df) that needs to be
passed to all the dataflow routines. df_finish destroys this
-object and frees up any allocated memory.
+object and frees up any allocated memory. DF_ALL says to analyse
+everything.
df_analyse performs the following:
If the insns are in SSA form then the reg-def and use-def lists
should only contain the single defining ref.
+
TODO:
1) Incremental dataflow analysis.
or deleted refs. Currently the global dataflow information is
recomputed from scratch but this could be propagated more efficiently.
-2) Improved global data flow computation using depth first search.
-
-3) Reduced memory requirements.
+2) Reduced memory requirements.
We could operate a pool of ref structures. When a ref is deleted it
gets returned to the pool (say by linking on to a chain of free refs).
periodically squeeze the def and use tables and associated bitmaps and
renumber the def and use ids.
-4) Ordering of reg-def and reg-use lists.
+3) Ordering of reg-def and reg-use lists.
Should the first entry in the def list be the first def (within a BB)?
Similarly, should the first entry in the use list be the last use
(within a BB)?
-5) Working with a sub-CFG.
+4) Working with a sub-CFG.
-Often the whole CFG does not need to be analysed, for example,
+Often the whole CFG does not need to be analyzed, for example,
when optimising a loop, only certain registers are of interest.
Perhaps there should be a bitmap argument to df_analyse to specify
- which registers should be analysed? */
+which registers should be analyzed?
+
+
+NOTES:
-#define HANDLE_SUBREG
+Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
+both a use and a def. These are both marked read/write to show that they
+are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
+will generate a use of reg 42 followed by a def of reg 42 (both marked
+read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
+generates a use of reg 41 then a def of reg 41 (both marked read/write),
+even though reg 41 is decremented before it is used for the memory
+address in this second example.
+
+A set to a REG inside a ZERO_EXTRACT, SIGN_EXTRACT, or SUBREG invokes
+a read-modify write operation. We generate both a use and a def
+and again mark them read/write.
+*/
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "regs.h"
-#include "obstack.h"
+#include "alloc-pool.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "sbitmap.h"
#include "df.h"
#include "fibheap.h"
-#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
-do { \
- unsigned int node_; \
- EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
- {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
-
-#define FOR_EACH_BB_IN_BITMAP_REV(BITMAP, MIN, BB, CODE) \
-do { \
- unsigned int node_; \
- EXECUTE_IF_SET_IN_BITMAP_REV (BITMAP, node_, \
- {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
-
-#define FOR_EACH_BB_IN_SBITMAP(BITMAP, MIN, BB, CODE) \
-do { \
- unsigned int node_; \
- EXECUTE_IF_SET_IN_SBITMAP (BITMAP, MIN, node_, \
- {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
-
-#define obstack_chunk_alloc xmalloc
-#define obstack_chunk_free free
-
-static struct obstack df_ref_obstack;
+#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
+ do \
+ { \
+ unsigned int node_; \
+ EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
+ {(BB) = BASIC_BLOCK (node_); CODE;}); \
+ } \
+ while (0)
+
+static alloc_pool df_ref_pool;
+static alloc_pool df_link_pool;
static struct df *ddf;
static void df_reg_table_realloc PARAMS((struct df *, int));
-#if 0
-static void df_def_table_realloc PARAMS((struct df *, int));
-#endif
-static void df_insn_table_realloc PARAMS((struct df *, int));
+static void df_insn_table_realloc PARAMS((struct df *, unsigned int));
static void df_bitmaps_alloc PARAMS((struct df *, int));
static void df_bitmaps_free PARAMS((struct df *, int));
static void df_free PARAMS((struct df *));
enum df_confluence_op,
transfer_function_sbitmap,
sbitmap, sbitmap, void *));
-static inline bool read_modify_subreg_p PARAMS ((rtx));
\f
/* Local memory allocation/deallocation routines. */
-/* Increase the insn info table by SIZE more elements. */
+/* Increase the insn info table to have space for at least SIZE + 1
+ elements. */
static void
df_insn_table_realloc (df, size)
struct df *df;
- int size;
+ unsigned int size;
{
- /* Make table 25 percent larger by default. */
- if (! size)
- size = df->insn_size / 4;
+ size++;
+ if (size <= df->insn_size)
+ return;
- size += df->insn_size;
+ /* Make the table a little larger than requested, so we do not need
+ to enlarge it so often. */
+ size += df->insn_size / 4;
df->insns = (struct insn_info *)
xrealloc (df->insns, size * sizeof (struct insn_info));
size = df->reg_size / 4;
size += df->reg_size;
+ if (size < max_reg_num ())
+ size = max_reg_num ();
df->regs = (struct reg_info *)
xrealloc (df->regs, size * sizeof (struct reg_info));
}
-#if 0
-/* Not currently used. */
-static void
-df_def_table_realloc (df, size)
- struct df *df;
- int size;
-{
- int i;
- struct ref *refs;
-
- /* Make table 25 percent larger by default. */
- if (! size)
- size = df->def_size / 4;
-
- df->def_size += size;
- df->defs = xrealloc (df->defs,
- df->def_size * sizeof (*df->defs));
-
- /* Allocate a new block of memory and link into list of blocks
- that will need to be freed later. */
-
- refs = xmalloc (size * sizeof (*refs));
-
- /* Link all the new refs together, overloading the chain field. */
- for (i = 0; i < size - 1; i++)
- refs[i].chain = (struct df_link *)(refs + i + 1);
- refs[size - 1].chain = 0;
-}
-#endif
-
-
-
/* Allocate bitmaps for each basic block. */
static void
df_bitmaps_alloc (df, flags)
basic_block bb;
/* Free the bitmaps if they need resizing. */
- if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
+ if ((flags & DF_LR) && df->n_regs < (unsigned int) max_reg_num ())
dflags |= DF_LR | DF_RU;
if ((flags & DF_RU) && df->n_uses < df->use_id)
dflags |= DF_RU;
}
-/* Allocate and initialise dataflow memory. */
+/* Allocate and initialize dataflow memory. */
static void
df_alloc (df, n_regs)
struct df *df;
int n_insns;
basic_block bb;
- gcc_obstack_init (&df_ref_obstack);
+ df_link_pool = create_alloc_pool ("df_link pool", sizeof (struct df_link),
+ 100);
+ df_ref_pool = create_alloc_pool ("df_ref pool", sizeof (struct ref), 100);
/* Perhaps we should use LUIDs to save memory for the insn_refs
table. This is only a small saving; a few pointers. */
BITMAP_XFREE (df->all_blocks);
df->all_blocks = 0;
- obstack_free (&df_ref_obstack, NULL);
+ free_alloc_pool (df_ref_pool);
+ free_alloc_pool (df_link_pool);
+
}
\f
/* Local miscellaneous routines. */
{
struct df_link *link;
- link = (struct df_link *) obstack_alloc (&df_ref_obstack,
- sizeof (*link));
+ link = pool_alloc (df_link_pool);
link->next = next;
link->ref = ref;
return link;
enum df_ref_flags ref_flags;
{
struct ref *this_ref;
- unsigned int uid;
- this_ref = (struct ref *) obstack_alloc (&df_ref_obstack,
- sizeof (*this_ref));
+ this_ref = pool_alloc (df_ref_pool);
DF_REF_REG (this_ref) = reg;
DF_REF_LOC (this_ref) = loc;
DF_REF_INSN (this_ref) = insn;
DF_REF_CHAIN (this_ref) = 0;
DF_REF_TYPE (this_ref) = ref_type;
DF_REF_FLAGS (this_ref) = ref_flags;
- uid = INSN_UID (insn);
if (ref_type == DF_REF_REG_DEF)
{
{
loc = &SUBREG_REG (reg);
reg = *loc;
+ ref_flags |= DF_REF_STRIPPED;
}
regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
if (! (df->flags & DF_HARD_REGS))
return;
- /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG
+ /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG
for the mode, because we only want to add references to regs, which
- are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
+ are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
reference the whole reg 0 in DI mode (which would also include
reg 1, at least, if 0 and 1 are SImode registers). */
- endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ if (GET_CODE (reg) == SUBREG)
+ regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
+ SUBREG_BYTE (reg), GET_MODE (reg));
+ endregno += regno;
for (i = regno; i < endregno; i++)
df_ref_record_1 (df, regno_reg_rtx[i],
}
}
-/* Writes to SUBREG of inndermode wider than word and outermode shorter than
- word are read-modify-write. */
-static inline bool
+/* Return non-zero if writes to paradoxical SUBREGs, or SUBREGs which
+ are too narrow, are read-modify-write. */
+bool
read_modify_subreg_p (x)
rtx x;
{
+ unsigned int isize, osize;
if (GET_CODE (x) != SUBREG)
return false;
- if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) <= UNITS_PER_WORD)
- return false;
- if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
- return false;
- return true;
+ isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
+ osize = GET_MODE_SIZE (GET_MODE (x));
+ /* Paradoxical subreg writes don't leave a trace of the old content. */
+ return (isize > osize && isize > UNITS_PER_WORD);
}
+
/* Process all the registers defined in the rtx, X. */
static void
df_def_record_1 (df, x, bb, insn)
basic_block bb;
rtx insn;
{
- rtx *loc = &SET_DEST (x);
- rtx dst = *loc;
+ rtx *loc;
+ rtx dst;
enum df_ref_flags flags = 0;
+ /* We may recursivly call ourselves on EXPR_LIST when dealing with PARALLEL
+ construct. */
+ if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
+ loc = &XEXP (x, 0);
+ else
+ loc = &SET_DEST (x);
+ dst = *loc;
+
/* Some targets place small structures in registers for
return values of functions. */
if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
int i;
for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
- df_def_record_1 (df, XVECEXP (dst, 0, i), bb, insn);
+ {
+ rtx temp = XVECEXP (dst, 0, i);
+ if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
+ || GET_CODE (temp) == SET)
+ df_def_record_1 (df, temp, bb, insn);
+ }
return;
}
- /* May be, we should flag the use of strict_low_part somehow. Might be
- handy for the reg allocator. */
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (GET_CODE (dst) == SUBREG
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ flags |= DF_REF_MODE_CHANGE;
+#endif
+
+ /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
+ be handy for the reg allocator. */
while (GET_CODE (dst) == STRICT_LOW_PART
|| GET_CODE (dst) == ZERO_EXTRACT
|| GET_CODE (dst) == SIGN_EXTRACT
- || read_modify_subreg_p (dst))
+ || ((df->flags & DF_FOR_REGALLOC) == 0
+ && read_modify_subreg_p (dst)))
{
- /* Strict low part always contains SUBREG, but we don't want to make
+ /* Strict low part always contains SUBREG, but we do not want to make
it appear outside, as whole register is always considered. */
if (GET_CODE (dst) == STRICT_LOW_PART)
{
loc = &XEXP (dst, 0);
dst = *loc;
}
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (GET_CODE (dst) == SUBREG
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ flags |= DF_REF_MODE_CHANGE;
+#endif
loc = &XEXP (dst, 0);
dst = *loc;
flags |= DF_REF_READ_WRITE;
}
- if (GET_CODE (dst) == REG
- || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
- df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags);
+ if (GET_CODE (dst) == REG
+ || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
+ df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags);
}
case CONST_DOUBLE:
case CONST_VECTOR:
case PC:
+ case CC0:
case ADDR_VEC:
case ADDR_DIFF_VEC:
return;
case SUBREG:
/* While we're here, optimize this case. */
- /* In case the SUBREG is not of a register, don't optimize. */
+ /* In case the SUBREG is not of a REG, do not optimize. */
if (GET_CODE (SUBREG_REG (x)) != REG)
{
loc = &SUBREG_REG (x);
df_uses_record (df, loc, ref_type, bb, insn, flags);
return;
}
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (x),
+ GET_MODE (SUBREG_REG (x))))
+ flags |= DF_REF_MODE_CHANGE;
+#endif
/* ... Fall through ... */
case REG:
- /* See a register (or subreg) other than being set. */
+ /* See a REG (or SUBREG) other than being set. */
df_ref_record (df, x, loc, insn, ref_type, flags);
return;
switch (GET_CODE (dst))
{
+ enum df_ref_flags use_flags;
case SUBREG:
- if (read_modify_subreg_p (dst))
+ if ((df->flags & DF_FOR_REGALLOC) == 0
+ && read_modify_subreg_p (dst))
{
+ use_flags = DF_REF_READ_WRITE;
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ use_flags |= DF_REF_MODE_CHANGE;
+#endif
df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
- insn, DF_REF_READ_WRITE);
+ insn, use_flags);
break;
}
/* ... FALLTHRU ... */
case REG:
+ case PARALLEL:
case PC:
- break;
+ case CC0:
+ break;
case MEM:
df_uses_record (df, &XEXP (dst, 0),
DF_REF_REG_MEM_STORE,
bb, insn, 0);
break;
case STRICT_LOW_PART:
- /* A strict_low_part uses the whole reg not only the subreg. */
+ /* A strict_low_part uses the whole REG and not just the SUBREG. */
dst = XEXP (dst, 0);
if (GET_CODE (dst) != SUBREG)
abort ();
+ use_flags = DF_REF_READ_WRITE;
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ use_flags |= DF_REF_MODE_CHANGE;
+#endif
df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
- insn, DF_REF_READ_WRITE);
+ insn, use_flags);
break;
case ZERO_EXTRACT:
case SIGN_EXTRACT:
{
switch (REG_NOTE_KIND (note))
{
- case REG_EQUIV:
- case REG_EQUAL:
- df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
- bb, insn, 0);
- default:
- break;
+ case REG_EQUIV:
+ case REG_EQUAL:
+ df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
+ bb, insn, 0);
+ default:
+ break;
}
}
df_uses_record (df, &PATTERN (insn),
DF_REF_REG_USE, bb, insn, 0);
-
if (GET_CODE (insn) == CALL_INSN)
{
rtx note;
struct ref *def = link->ref;
unsigned int dregno = DF_REF_REGNO (def);
+ /* Do not add ref's to the chain twice, i.e., only add new
+ refs. XXX the same could be done by testing if the
+ current insn is a modified (or a new) one. This would be
+ faster. */
+ if (DF_REF_ID (def) < df->def_id_save)
+ continue;
+
df->regs[dregno].defs
= df_link_create (def, df->regs[dregno].defs);
}
{
rtx insn;
- /* Scan in forward order so that the last uses appear at the
- start of the chain. */
+ /* Scan in forward order so that the last uses appear at the start
+ of the chain. */
for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
struct ref *use = link->ref;
unsigned int uregno = DF_REF_REGNO (use);
+ /* Do not add ref's to the chain twice, i.e., only add new
+ refs. XXX the same could be done by testing if the
+ current insn is a modified (or a new) one. This would be
+ faster. */
+ if (DF_REF_ID (use) < df->use_id_save)
+ continue;
+
df->regs[uregno].uses
= df_link_create (use, df->regs[uregno].uses);
}
}
- /* For each def in insn...record the last def of each reg. */
+ /* For each def in insn... record the last def of each reg. */
for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
{
struct ref *def = def_link->ref;
{
*changed = bitmap_union_of_diff (out, gen, in, kill);
}
+
+
static void
df_ru_transfer_function (bb, changed, in, out, gen, kill, data)
int bb ATTRIBUTE_UNUSED;
*changed = bitmap_union_of_diff (in, gen, out, kill);
}
+
static void
df_lr_transfer_function (bb, changed, in, out, use, def, data)
int bb ATTRIBUTE_UNUSED;
return total;
}
+
/* Perform dataflow analysis using existing DF structure for blocks
within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
static void
aflags |= DF_LR;
if (! blocks)
- blocks = df->all_blocks;
+ blocks = df->all_blocks;
df->flags = flags;
if (update)
df_reg_use_chain_create (df, blocks);
}
- df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
- df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
- df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
- df->inverse_dfs_map = xmalloc (sizeof(int) * last_basic_block);
- df->inverse_rc_map = xmalloc (sizeof(int) * last_basic_block);
- df->inverse_rts_map = xmalloc (sizeof(int) * last_basic_block);
+ df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
+ df->rc_order = xmalloc (sizeof (int) * n_basic_blocks);
+ df->rts_order = xmalloc (sizeof (int) * n_basic_blocks);
+ df->inverse_dfs_map = xmalloc (sizeof (int) * last_basic_block);
+ df->inverse_rc_map = xmalloc (sizeof (int) * last_basic_block);
+ df->inverse_rts_map = xmalloc (sizeof (int) * last_basic_block);
flow_depth_first_order_compute (df->dfs_order, df->rc_order);
flow_reverse_top_sort_order_compute (df->rts_order);
- for (i = 0; i < n_basic_blocks; i ++)
- {
- df->inverse_dfs_map[df->dfs_order[i]] = i;
- df->inverse_rc_map[df->rc_order[i]] = i;
- df->inverse_rts_map[df->rts_order[i]] = i;
- }
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ df->inverse_dfs_map[df->dfs_order[i]] = i;
+ df->inverse_rc_map[df->rc_order[i]] = i;
+ df->inverse_rts_map[df->rts_order[i]] = i;
+ }
if (aflags & DF_RD)
{
/* Compute the sets of gens and kills for the defs of each bb. */
kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
- FORWARD, UNION, df_rd_transfer_function,
+ DF_FORWARD, DF_UNION, df_rd_transfer_function,
df->inverse_rc_map, NULL);
free (in);
free (out);
kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
- BACKWARD, UNION, df_ru_transfer_function,
+ DF_BACKWARD, DF_UNION, df_ru_transfer_function,
df->inverse_rts_map, NULL);
free (in);
free (out);
/* Free up bitmaps that are no longer required. */
if (dflags)
- df_bitmaps_free (df, dflags);
+ df_bitmaps_free (df, dflags);
if (aflags & DF_LR)
{
def[bb->index] = DF_BB_INFO (df, bb)->lr_def;
}
iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
- BACKWARD, UNION, df_lr_transfer_function,
+ DF_BACKWARD, DF_UNION, df_lr_transfer_function,
df->inverse_rts_map, NULL);
free (in);
free (out);
{
df_reg_info_compute (df, df->all_blocks);
}
+
free (df->dfs_order);
free (df->rc_order);
free (df->rts_order);
}
-/* Initialise dataflow analysis. */
+/* Initialize dataflow analysis. */
struct df *
df_init ()
{
rtx insn;
int count = 0;
- /* While we have to scan the chain of insns for this BB, we don't
+ /* While we have to scan the chain of insns for this BB, we do not
need to allocate and queue a long chain of BB/INSN pairs. Using
a bitmap for insns_modified saves memory and avoids queuing
duplicates. */
/* Scan the insn for refs. */
df_insn_refs_record (df, bb, insn);
-
- bitmap_clear_bit (df->insns_modified, uid);
count++;
}
if (insn == bb->end)
basic_block bb;
int count = 0;
- if ((unsigned int)max_reg_num () >= df->reg_size)
+ if ((unsigned int) max_reg_num () >= df->reg_size)
df_reg_table_realloc (df, 0);
df_refs_queue (df);
}
-/* Return non-zero if any of the requested blocks in the bitmap
+/* Return nonzero if any of the requested blocks in the bitmap
BLOCKS have been modified. */
static int
df_modified_p (df, blocks)
}
-/* Analyse dataflow info for the basic blocks specified by the bitmap
+/* Analyze dataflow info for the basic blocks specified by the bitmap
BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
modified blocks if BLOCKS is -1. */
int
/* Recompute everything from scratch. */
df_free (df);
}
- /* Allocate and initialise data structures. */
+ /* Allocate and initialize data structures. */
df_alloc (df, max_reg_num ());
df_analyse_1 (df, 0, flags, 0);
update = 1;
df_analyse_1 (df, blocks, flags, 1);
bitmap_zero (df->bbs_modified);
+ bitmap_zero (df->insns_modified);
}
}
return update;
unsigned int uid;
uid = INSN_UID (insn);
-
if (uid >= df->insn_size)
- df_insn_table_realloc (df, 0);
+ df_insn_table_realloc (df, uid);
bitmap_set_bit (df->bbs_modified, bb->index);
bitmap_set_bit (df->insns_modified, uid);
uid = INSN_UID (insn);
if (uid >= df->insn_size)
- df_insn_table_realloc (df, 0);
+ df_insn_table_realloc (df, uid);
df_insn_modify (df, bb, insn);
}
-/* Return non-zero if all DF dominates all the uses within the bitmap
+/* Return nonzero if all DF dominates all the uses within the bitmap
BLOCKS. */
static int
df_def_dominates_uses_p (df, def, blocks)
}
-/* Return non-zero if all the defs of INSN within BB dominates
+/* Return nonzero if all the defs of INSN within BB dominates
all the corresponding uses. */
int
df_insn_dominates_uses_p (df, bb, insn, blocks)
}
-/* Return non-zero if REG used in multiple basic blocks. */
+/* Return nonzero if REG used in multiple basic blocks. */
int
df_reg_global_p (df, reg)
struct df *df;
}
-/* Return non-zero if REG live at start of BB. */
+/* Return nonzero if REG live at start of BB. */
int
df_bb_reg_live_start_p (df, bb, reg)
struct df *df ATTRIBUTE_UNUSED;
}
-/* Return non-zero if REG live at end of BB. */
+/* Return nonzero if REG live at end of BB. */
int
df_bb_reg_live_end_p (df, bb, reg)
struct df *df ATTRIBUTE_UNUSED;
fprintf (file, "}");
}
+
+/* Dump a chain of refs with the associated regno. */
static void
df_chain_dump_regno (link, file)
struct df_link *link;
fprintf (file, "}");
}
+
/* Dump dataflow info. */
void
df_dump (df, flags, file)
&& (reg_info[j].n_uses || reg_info[j].n_defs))
|| ((flags & DF_RD_CHAIN) && reg_info[j].defs)
|| ((flags & DF_RU_CHAIN) && reg_info[j].uses))
- {
- fprintf (file, "reg %d", j);
- if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
- {
- basic_block bb = df_regno_bb (df, j);
+ {
+ fprintf (file, "reg %d", j);
+ if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
+ {
+ basic_block bb = df_regno_bb (df, j);
- if (bb)
- fprintf (file, " bb %d", bb->index);
- else
- fprintf (file, " bb ?");
- }
- if (flags & DF_REG_INFO)
- {
- fprintf (file, " life %d", reg_info[j].lifetime);
- }
+ if (bb)
+ fprintf (file, " bb %d", bb->index);
+ else
+ fprintf (file, " bb ?");
+ }
+ if (flags & DF_REG_INFO)
+ {
+ fprintf (file, " life %d", reg_info[j].lifetime);
+ }
- if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
- {
- fprintf (file, " defs ");
- if (flags & DF_REG_INFO)
- fprintf (file, "%d ", reg_info[j].n_defs);
- if (flags & DF_RD_CHAIN)
- df_chain_dump (reg_info[j].defs, file);
- }
+ if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
+ {
+ fprintf (file, " defs ");
+ if (flags & DF_REG_INFO)
+ fprintf (file, "%d ", reg_info[j].n_defs);
+ if (flags & DF_RD_CHAIN)
+ df_chain_dump (reg_info[j].defs, file);
+ }
- if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
- {
- fprintf (file, " uses ");
- if (flags & DF_REG_INFO)
- fprintf (file, "%d ", reg_info[j].n_uses);
- if (flags & DF_RU_CHAIN)
- df_chain_dump (reg_info[j].uses, file);
- }
+ if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
+ {
+ fprintf (file, " uses ");
+ if (flags & DF_REG_INFO)
+ fprintf (file, "%d ", reg_info[j].n_uses);
+ if (flags & DF_RU_CHAIN)
+ df_chain_dump (reg_info[j].uses, file);
+ }
- fprintf (file, "\n");
- }
+ fprintf (file, "\n");
+ }
}
}
fprintf (file, "\n");
if (df->insns[uid].defs)
bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
- else if (df->insns[uid].uses)
+ else if (df->insns[uid].uses)
bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
else
bbi = -1;
fprintf (file, "\n");
}
+
void
df_insn_debug_regno (df, insn, file)
struct df *df;
if (df->insns[uid].defs)
bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
- else if (df->insns[uid].uses)
+ else if (df->insns[uid].uses)
bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
else
bbi = -1;
fprintf (file, "\n");
}
+
static void
df_regno_debug (df, regno, file)
struct df *df;
df_chain_dump (DF_REF_CHAIN (ref), file);
fprintf (file, "\n");
}
-
+\f
+/* Functions for debugging from GDB. */
void
debug_df_insn (insn)
df_chain_dump (link, stderr);
fputc ('\n', stderr);
}
+\f
/* Hybrid search algorithm from "Implementation Techniques for
Efficient Data-Flow Analysis of Large Programs". */
int changed;
int i = block->index;
edge e;
- basic_block bb= block;
+ basic_block bb = block;
+
SET_BIT (visited, block->index);
if (TEST_BIT (pending, block->index))
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
- /* Calculate <conf_op> of predecessor_outs */
+ /* Calculate <conf_op> of predecessor_outs. */
bitmap_zero (in[i]);
for (e = bb->pred; e != 0; e = e->pred_next)
{
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
bitmap_a_or_b (in[i], in[i], out[e->src->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
bitmap_a_and_b (in[i], in[i], out[e->src->index]);
break;
}
}
else
{
- /* Calculate <conf_op> of successor ins */
- bitmap_zero(out[i]);
+ /* Calculate <conf_op> of successor ins. */
+ bitmap_zero (out[i]);
for (e = bb->succ; e != 0; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR)
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
break;
}
RESET_BIT (pending, i);
if (changed)
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
}
}
}
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
int changed;
int i = block->index;
edge e;
- basic_block bb= block;
+ basic_block bb = block;
+
SET_BIT (visited, block->index);
if (TEST_BIT (pending, block->index))
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
- /* Calculate <conf_op> of predecessor_outs */
+ /* Calculate <conf_op> of predecessor_outs. */
sbitmap_zero (in[i]);
for (e = bb->pred; e != 0; e = e->pred_next)
{
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
break;
}
}
else
{
- /* Calculate <conf_op> of successor ins */
- sbitmap_zero(out[i]);
+ /* Calculate <conf_op> of successor ins. */
+ sbitmap_zero (out[i]);
for (e = bb->succ; e != 0; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR)
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
break;
}
}
}
- /* Common part */
+ /* Common part. */
(*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
RESET_BIT (pending, i);
if (changed)
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
}
}
}
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
}
-
-
/* gen = GEN set.
kill = KILL set.
in, out = Filled in by function.
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
+
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
+
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
{
fibheap_insert (worklist, order[i], (void *) (size_t) i);
SET_BIT (pending, i);
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
sbitmap_copy (out[i], gen[i]);
else
sbitmap_copy (in[i], gen[i]);
});
+
while (sbitmap_first_set_bit (pending) != -1)
{
while (!fibheap_empty (worklist))
hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
+
if (sbitmap_first_set_bit (pending) != -1)
{
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
break;
}
}
+
sbitmap_free (pending);
sbitmap_free (visited);
fibheap_delete (worklist);
}
+
/* Exactly the same as iterative_dataflow_sbitmap, except it works on
- bitmaps instead */
+ bitmaps instead. */
void
iterative_dataflow_bitmap (in, out, gen, kill, blocks,
dir, conf_op, transfun, order, data)
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
+
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
+
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
{
fibheap_insert (worklist, order[i], (void *) (size_t) i);
SET_BIT (pending, i);
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
bitmap_copy (out[i], gen[i]);
else
bitmap_copy (in[i], gen[i]);
});
+
while (sbitmap_first_set_bit (pending) != -1)
{
while (!fibheap_empty (worklist))
hybrid_search_bitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
+
if (sbitmap_first_set_bit (pending) != -1)
{
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,