/* Dataflow support routines.
- Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
mhayes@redhat.com)
Perhaps there should be a bitmap argument to df_analyse to specify
which registers should be analysed? */
-#define HANDLE_SUBREG
-
#include "config.h"
#include "system.h"
#include "rtl.h"
#include "obstack.h"
#include "hard-reg-set.h"
#include "basic-block.h"
+#include "sbitmap.h"
#include "bitmap.h"
#include "df.h"
-
-
-#define FOR_ALL_BBS(BB, CODE) \
-do { \
- int node_; \
- for (node_ = 0; node_ < n_basic_blocks; node_++) \
- {(BB) = BASIC_BLOCK (node_); CODE;};} while (0)
+#include "fibheap.h"
#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
do { \
EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
{(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
-#define FOR_EACH_BB_IN_BITMAP_REV(BITMAP, MIN, BB, CODE) \
-do { \
- unsigned int node_; \
- EXECUTE_IF_SET_IN_BITMAP_REV (BITMAP, node_, \
- {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
-
-#define FOR_EACH_BB_IN_SBITMAP(BITMAP, MIN, BB, CODE) \
-do { \
- unsigned int node_; \
- EXECUTE_IF_SET_IN_SBITMAP (BITMAP, MIN, node_, \
- {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
-
-#define obstack_chunk_alloc xmalloc
-#define obstack_chunk_free free
-
static struct obstack df_ref_obstack;
static struct df *ddf;
#if 0
static void df_def_table_realloc PARAMS((struct df *, int));
#endif
-static void df_insn_table_realloc PARAMS((struct df *, int));
+static void df_insn_table_realloc PARAMS((struct df *, unsigned int));
static void df_bitmaps_alloc PARAMS((struct df *, int));
static void df_bitmaps_free PARAMS((struct df *, int));
static void df_free PARAMS((struct df *));
#endif
static struct ref *df_ref_create PARAMS((struct df *,
- rtx, rtx *, basic_block, rtx,
- enum df_ref_type));
+ rtx, rtx *, rtx,
+ enum df_ref_type, enum df_ref_flags));
static void df_ref_record_1 PARAMS((struct df *, rtx, rtx *,
- basic_block, rtx, enum df_ref_type));
+ rtx, enum df_ref_type,
+ enum df_ref_flags));
static void df_ref_record PARAMS((struct df *, rtx, rtx *,
- basic_block bb, rtx, enum df_ref_type));
+ rtx, enum df_ref_type,
+ enum df_ref_flags));
static void df_def_record_1 PARAMS((struct df *, rtx, basic_block, rtx));
static void df_defs_record PARAMS((struct df *, rtx, basic_block, rtx));
static void df_uses_record PARAMS((struct df *, rtx *,
- enum df_ref_type, basic_block, rtx));
+ enum df_ref_type, basic_block, rtx,
+ enum df_ref_flags));
static void df_insn_refs_record PARAMS((struct df *, basic_block, rtx));
static void df_bb_refs_record PARAMS((struct df *, basic_block));
static void df_refs_record PARAMS((struct df *, bitmap));
-static int df_visit_next_rc PARAMS ((struct df *, sbitmap));
-static int df_visit_next_rts PARAMS ((struct df *, sbitmap));
static void df_bb_reg_def_chain_create PARAMS((struct df *, basic_block));
static void df_reg_def_chain_create PARAMS((struct df *, bitmap));
static void df_bb_reg_use_chain_create PARAMS((struct df *, basic_block));
static void df_du_chain_create PARAMS((struct df *, bitmap));
static void df_bb_ud_chain_create PARAMS((struct df *, basic_block));
static void df_ud_chain_create PARAMS((struct df *, bitmap));
-static void df_rd_global_compute PARAMS((struct df *, bitmap));
-static void df_ru_global_compute PARAMS((struct df *, bitmap));
-static void df_lr_global_compute PARAMS((struct df *, bitmap));
static void df_bb_rd_local_compute PARAMS((struct df *, basic_block));
static void df_rd_local_compute PARAMS((struct df *, bitmap));
static void df_bb_ru_local_compute PARAMS((struct df *, basic_block));
static void df_chain_dump_regno PARAMS((struct df_link *, FILE *file));
static void df_regno_debug PARAMS ((struct df *, unsigned int, FILE *));
static void df_ref_debug PARAMS ((struct df *, struct ref *, FILE *));
+static void df_rd_transfer_function PARAMS ((int, int *, bitmap, bitmap,
+ bitmap, bitmap, void *));
+static void df_ru_transfer_function PARAMS ((int, int *, bitmap, bitmap,
+ bitmap, bitmap, void *));
+static void df_lr_transfer_function PARAMS ((int, int *, bitmap, bitmap,
+ bitmap, bitmap, void *));
+static void hybrid_search_bitmap PARAMS ((basic_block, bitmap *, bitmap *,
+ bitmap *, bitmap *, enum df_flow_dir,
+ enum df_confluence_op,
+ transfer_function_bitmap,
+ sbitmap, sbitmap, void *));
+static void hybrid_search_sbitmap PARAMS ((basic_block, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *, enum df_flow_dir,
+ enum df_confluence_op,
+ transfer_function_sbitmap,
+ sbitmap, sbitmap, void *));
+static inline bool read_modify_subreg_p PARAMS ((rtx));
\f
/* Local memory allocation/deallocation routines. */
-/* Increase the insn info table by SIZE more elements. */
+/* Increase the insn info table to have space for at least SIZE + 1
+ elements. */
static void
df_insn_table_realloc (df, size)
struct df *df;
- int size;
+ unsigned int size;
{
- /* Make table 25 percent larger by default. */
- if (! size)
- size = df->insn_size / 4;
+ size++;
+ if (size <= df->insn_size)
+ return;
- size += df->insn_size;
+ /* Make the table a little larger than requested, so we don't need
+ to enlarge it so often. */
+ size += df->insn_size / 4;
df->insns = (struct insn_info *)
xrealloc (df->insns, size * sizeof (struct insn_info));
size = df->reg_size / 4;
size += df->reg_size;
+ if (size < max_reg_num ())
+ size = max_reg_num ();
df->regs = (struct reg_info *)
xrealloc (df->regs, size * sizeof (struct reg_info));
struct df *df;
int flags;
{
- unsigned int i;
int dflags = 0;
+ basic_block bb;
/* Free the bitmaps if they need resizing. */
if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
df->n_defs = df->def_id;
df->n_uses = df->use_id;
- for (i = 0; i < df->n_bbs; i++)
+ FOR_EACH_BB (bb)
{
- basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (flags & DF_RD && ! bb_info->rd_in)
struct df *df ATTRIBUTE_UNUSED;
int flags;
{
- unsigned int i;
+ basic_block bb;
- for (i = 0; i < df->n_bbs; i++)
+ FOR_EACH_BB (bb)
{
- basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (!bb_info)
}
-/* Allocate and initialise dataflow memory. */
+/* Allocate and initialize dataflow memory. */
static void
df_alloc (df, n_regs)
struct df *df;
int n_regs;
{
int n_insns;
- int i;
+ basic_block bb;
gcc_obstack_init (&df_ref_obstack);
df->uses = xmalloc (df->use_size * sizeof (*df->uses));
df->n_regs = n_regs;
- df->n_bbs = n_basic_blocks;
+ df->n_bbs = last_basic_block;
/* Allocate temporary working array used during local dataflow analysis. */
df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
df->flags = 0;
- df->bbs = xcalloc (df->n_bbs, sizeof (struct bb_info));
+ df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
df->all_blocks = BITMAP_XMALLOC ();
- for (i = 0; i < n_basic_blocks; i++)
- bitmap_set_bit (df->all_blocks, i);
+ FOR_EACH_BB (bb)
+ bitmap_set_bit (df->all_blocks, bb->index);
}
rtx reg;
rtx use;
- reg = regno >= FIRST_PSEUDO_REGISTER
- ? regno_reg_rtx[regno] : gen_rtx_REG (reg_raw_mode[regno], regno);
+ reg = regno_reg_rtx[regno];
use = gen_rtx_USE (GET_MODE (reg), reg);
return use;
rtx reg;
rtx use;
- reg = regno >= FIRST_PSEUDO_REGISTER
- ? regno_reg_rtx[regno] : gen_rtx_REG (reg_raw_mode[regno], regno);
+ reg = regno_reg_rtx[regno];
use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
return use;
/* Create a new ref of type DF_REF_TYPE for register REG at address
LOC within INSN of BB. */
static struct ref *
-df_ref_create (df, reg, loc, bb, insn, ref_type)
+df_ref_create (df, reg, loc, insn, ref_type, ref_flags)
struct df *df;
rtx reg;
rtx *loc;
- basic_block bb;
rtx insn;
enum df_ref_type ref_type;
+ enum df_ref_flags ref_flags;
{
struct ref *this_ref;
unsigned int uid;
sizeof (*this_ref));
DF_REF_REG (this_ref) = reg;
DF_REF_LOC (this_ref) = loc;
- DF_REF_BB (this_ref) = bb;
DF_REF_INSN (this_ref) = insn;
DF_REF_CHAIN (this_ref) = 0;
DF_REF_TYPE (this_ref) = ref_type;
+ DF_REF_FLAGS (this_ref) = ref_flags;
uid = INSN_UID (insn);
if (ref_type == DF_REF_REG_DEF)
/* Create a new reference of type DF_REF_TYPE for a single register REG,
used inside the LOC rtx of INSN. */
static void
-df_ref_record_1 (df, reg, loc, bb, insn, ref_type)
+df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags)
struct df *df;
rtx reg;
rtx *loc;
- basic_block bb;
rtx insn;
enum df_ref_type ref_type;
+ enum df_ref_flags ref_flags;
{
- df_ref_create (df, reg, loc, bb, insn, ref_type);
+ df_ref_create (df, reg, loc, insn, ref_type, ref_flags);
}
/* Create new references of type DF_REF_TYPE for each part of register REG
at address LOC within INSN of BB. */
static void
-df_ref_record (df, reg, loc, bb, insn, ref_type)
+df_ref_record (df, reg, loc, insn, ref_type, ref_flags)
struct df *df;
rtx reg;
rtx *loc;
- basic_block bb;
rtx insn;
enum df_ref_type ref_type;
+ enum df_ref_flags ref_flags;
{
unsigned int regno;
XXX Is that true? We could also use the global word_mode variable. */
if (GET_CODE (reg) == SUBREG
&& (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
- || GET_MODE_SIZE (GET_MODE (reg))
+ || GET_MODE_SIZE (GET_MODE (reg))
>= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
{
loc = &SUBREG_REG (reg);
are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
reference the whole reg 0 in DI mode (which would also include
reg 1, at least, if 0 and 1 are SImode registers). */
- endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ if (GET_CODE (reg) == SUBREG)
+ regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
+ SUBREG_BYTE (reg), GET_MODE (reg));
+ endregno += regno;
for (i = regno; i < endregno; i++)
- df_ref_record_1 (df, gen_rtx_REG (reg_raw_mode[i], i),
- loc, bb, insn, ref_type);
+ df_ref_record_1 (df, regno_reg_rtx[i],
+ loc, insn, ref_type, ref_flags);
}
else
{
- df_ref_record_1 (df, reg, loc, bb, insn, ref_type);
+ df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags);
}
}
+/* Writes to paradoxical subregs, or subregs which are too narrow
+ are read-modify-write. */
+
+static inline bool
+read_modify_subreg_p (x)
+ rtx x;
+{
+ unsigned int isize, osize;
+ if (GET_CODE (x) != SUBREG)
+ return false;
+ isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
+ osize = GET_MODE_SIZE (GET_MODE (x));
+ if (isize <= osize)
+ return true;
+ if (isize <= UNITS_PER_WORD)
+ return false;
+ if (osize >= UNITS_PER_WORD)
+ return false;
+ return true;
+}
/* Process all the registers defined in the rtx, X. */
static void
{
rtx *loc = &SET_DEST (x);
rtx dst = *loc;
+ enum df_ref_flags flags = 0;
/* Some targets place small structures in registers for
return values of functions. */
return;
}
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (GET_CODE (dst) == SUBREG
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ flags |= DF_REF_MODE_CHANGE;
+#endif
+
/* May be, we should flag the use of strict_low_part somehow. Might be
handy for the reg allocator. */
-#ifdef HANDLE_SUBREG
while (GET_CODE (dst) == STRICT_LOW_PART
- || GET_CODE (dst) == ZERO_EXTRACT
- || GET_CODE (dst) == SIGN_EXTRACT)
- {
- loc = &XEXP (dst, 0);
- dst = *loc;
- }
- /* For the reg allocator we are interested in exact register references.
- This means, we want to know, if only a part of a register is
- used/defd. */
-/*
- if (GET_CODE (dst) == SUBREG)
- {
- loc = &XEXP (dst, 0);
- dst = *loc;
- } */
-#else
-
- while (GET_CODE (dst) == SUBREG
|| GET_CODE (dst) == ZERO_EXTRACT
|| GET_CODE (dst) == SIGN_EXTRACT
- || GET_CODE (dst) == STRICT_LOW_PART)
+ || read_modify_subreg_p (dst))
{
+ /* Strict low part always contains SUBREG, but we don't want to make
+ it appear outside, as whole register is always considered. */
+ if (GET_CODE (dst) == STRICT_LOW_PART)
+ {
+ loc = &XEXP (dst, 0);
+ dst = *loc;
+ }
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (GET_CODE (dst) == SUBREG
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ flags |= DF_REF_MODE_CHANGE;
+#endif
loc = &XEXP (dst, 0);
dst = *loc;
+ flags |= DF_REF_READ_WRITE;
}
-#endif
- if (GET_CODE (dst) == REG
- || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
- df_ref_record (df, dst, loc, bb, insn, DF_REF_REG_DEF);
+ if (GET_CODE (dst) == REG
+ || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
+ df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags);
}
/* Process all the registers used in the rtx at address LOC. */
static void
-df_uses_record (df, loc, ref_type, bb, insn)
+df_uses_record (df, loc, ref_type, bb, insn, flags)
struct df *df;
rtx *loc;
enum df_ref_type ref_type;
basic_block bb;
rtx insn;
+ enum df_ref_flags flags;
{
RTX_CODE code;
rtx x;
-
retry:
x = *loc;
+ if (!x)
+ return;
code = GET_CODE (x);
switch (code)
{
case CONST_INT:
case CONST:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case PC:
case ADDR_VEC:
case ADDR_DIFF_VEC:
as being used. */
if (GET_CODE (XEXP (x, 0)) == MEM)
df_uses_record (df, &XEXP (XEXP (x, 0), 0),
- DF_REF_REG_MEM_STORE, bb, insn);
+ DF_REF_REG_MEM_STORE, bb, insn, flags);
/* If we're clobbering a REG then we have a def so ignore. */
return;
case MEM:
- df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn);
+ df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, flags);
return;
case SUBREG:
/* While we're here, optimize this case. */
-#if defined(HANDLE_SUBREG)
/* In case the SUBREG is not of a register, don't optimize. */
if (GET_CODE (SUBREG_REG (x)) != REG)
{
loc = &SUBREG_REG (x);
- df_uses_record (df, loc, ref_type, bb, insn);
- return;
- }
-#else
- loc = &SUBREG_REG (x);
- x = *loc;
- if (GET_CODE (x) != REG)
- {
- df_uses_record (df, loc, ref_type, bb, insn);
+ df_uses_record (df, loc, ref_type, bb, insn, flags);
return;
}
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (x),
+ GET_MODE (SUBREG_REG (x))))
+ flags |= DF_REF_MODE_CHANGE;
#endif
/* ... Fall through ... */
case REG:
/* See a register (or subreg) other than being set. */
- df_ref_record (df, x, loc, bb, insn, ref_type);
+ df_ref_record (df, x, loc, insn, ref_type, flags);
return;
case SET:
{
rtx dst = SET_DEST (x);
- int use_dst = 0;
- /* If storing into MEM, don't show it as being used. But do
- show the address as being used. */
- if (GET_CODE (dst) == MEM)
- {
- df_uses_record (df, &XEXP (dst, 0),
- DF_REF_REG_MEM_STORE,
- bb, insn);
- df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn);
- return;
- }
+ df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0);
-#if 1 && defined(HANDLE_SUBREG)
- /* Look for sets that perform a read-modify-write. */
- while (GET_CODE (dst) == STRICT_LOW_PART
- || GET_CODE (dst) == ZERO_EXTRACT
- || GET_CODE (dst) == SIGN_EXTRACT)
- {
- if (GET_CODE (dst) == STRICT_LOW_PART)
- {
- dst = XEXP (dst, 0);
- if (GET_CODE (dst) != SUBREG)
- abort ();
- /* A strict_low_part uses the whole reg not only the subreg. */
- df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb, insn);
- }
- else
- {
- df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn);
- dst = XEXP (dst, 0);
- }
- }
- if (GET_CODE (dst) == SUBREG)
- {
- /* Paradoxical or too small subreg's are read-mod-write. */
- if (GET_MODE_SIZE (GET_MODE (dst)) < GET_MODE_SIZE (word_mode)
- || GET_MODE_SIZE (GET_MODE (dst))
- >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dst))))
- use_dst = 1;
- }
- /* In the original code also some SUBREG rtx's were considered
- read-modify-write (those with
- REG_SIZE(SUBREG_REG(dst)) > REG_SIZE(dst) )
- e.g. a (subreg:QI (reg:SI A) 0). I can't see this. The only
- reason for a read cycle for reg A would be to somehow preserve
- the bits outside of the subreg:QI. But for this a strict_low_part
- was necessary anyway, and this we handled already. */
-#else
- while (GET_CODE (dst) == STRICT_LOW_PART
- || GET_CODE (dst) == ZERO_EXTRACT
- || GET_CODE (dst) == SIGN_EXTRACT
- || GET_CODE (dst) == SUBREG)
+ switch (GET_CODE (dst))
{
- /* A SUBREG of a smaller size does not use the old value. */
- if (GET_CODE (dst) != SUBREG
- || (REG_SIZE (SUBREG_REG (dst)) > REG_SIZE (dst)))
- use_dst = 1;
- dst = XEXP (dst, 0);
- }
+ enum df_ref_flags use_flags;
+ case SUBREG:
+ if (read_modify_subreg_p (dst))
+ {
+ use_flags = DF_REF_READ_WRITE;
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ use_flags |= DF_REF_MODE_CHANGE;
#endif
-
- if ((GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
- || GET_CODE (dst) == REG || GET_CODE (dst) == SUBREG)
- {
-#if 1 || !defined(HANDLE_SUBREG)
- if (use_dst)
- df_uses_record (df, &SET_DEST (x), DF_REF_REG_USE, bb, insn);
+ df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
+ insn, use_flags);
+ break;
+ }
+ /* ... FALLTHRU ... */
+ case REG:
+ case PC:
+ case PARALLEL:
+ break;
+ case MEM:
+ df_uses_record (df, &XEXP (dst, 0),
+ DF_REF_REG_MEM_STORE,
+ bb, insn, 0);
+ break;
+ case STRICT_LOW_PART:
+ /* A strict_low_part uses the whole reg not only the subreg. */
+ dst = XEXP (dst, 0);
+ if (GET_CODE (dst) != SUBREG)
+ abort ();
+ use_flags = DF_REF_READ_WRITE;
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
+ GET_MODE (SUBREG_REG (dst))))
+ use_flags |= DF_REF_MODE_CHANGE;
#endif
- df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn);
- return;
+ df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
+ insn, use_flags);
+ break;
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn,
+ DF_REF_READ_WRITE);
+ df_uses_record (df, &XEXP (dst, 1), DF_REF_REG_USE, bb, insn, 0);
+ df_uses_record (df, &XEXP (dst, 2), DF_REF_REG_USE, bb, insn, 0);
+ dst = XEXP (dst, 0);
+ break;
+ default:
+ abort ();
}
+ return;
}
- break;
case RETURN:
break;
For now, just mark any regs we can find in ASM_OPERANDS as
used. */
- /* For all ASM_OPERANDS, we must traverse the vector of input operands.
+ /* For all ASM_OPERANDS, we must traverse the vector of input operands.
We can not just fall through here since then we would be confused
by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
traditional asms unlike their normal usage. */
for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
df_uses_record (df, &ASM_OPERANDS_INPUT (x, j),
- DF_REF_REG_USE, bb, insn);
+ DF_REF_REG_USE, bb, insn, 0);
return;
}
break;
case PRE_MODIFY:
case POST_MODIFY:
/* Catch the def of the register being modified. */
- df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), bb, insn, DF_REF_REG_DEF);
+ df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), insn, DF_REF_REG_DEF, DF_REF_READ_WRITE);
/* ... Fall through to handle uses ... */
loc = &XEXP (x, 0);
goto retry;
}
- df_uses_record (df, &XEXP (x, i), ref_type, bb, insn);
+ df_uses_record (df, &XEXP (x, i), ref_type, bb, insn, flags);
}
else if (fmt[i] == 'E')
{
int j;
for (j = 0; j < XVECLEN (x, i); j++)
df_uses_record (df, &XVECEXP (x, i, j), ref_type,
- bb, insn);
+ bb, insn, flags);
}
}
}
case REG_EQUIV:
case REG_EQUAL:
df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
- bb, insn);
+ bb, insn, 0);
default:
break;
}
note = XEXP (note, 1))
{
if (GET_CODE (XEXP (note, 0)) == USE)
- df_uses_record (df, &SET_DEST (XEXP (note, 0)), DF_REF_REG_USE,
- bb, insn);
+ df_uses_record (df, &XEXP (XEXP (note, 0), 0), DF_REF_REG_USE,
+ bb, insn, 0);
}
/* The stack ptr is used (honorarily) by a CALL insn. */
x = df_reg_use_gen (STACK_POINTER_REGNUM);
- df_uses_record (df, &SET_DEST (x), DF_REF_REG_USE, bb, insn);
+ df_uses_record (df, &XEXP (x, 0), DF_REF_REG_USE, bb, insn, 0);
if (df->flags & DF_HARD_REGS)
{
{
x = df_reg_use_gen (i);
df_uses_record (df, &SET_DEST (x),
- DF_REF_REG_USE, bb, insn);
+ DF_REF_REG_USE, bb, insn, 0);
}
}
}
/* Record the register uses. */
df_uses_record (df, &PATTERN (insn),
- DF_REF_REG_USE, bb, insn);
+ DF_REF_REG_USE, bb, insn, 0);
if (GET_CODE (insn) == CALL_INSN)
/* Perhaps the defs should be sorted using a depth first search
of the CFG (or possibly a breadth first search). We currently
scan the basic blocks in reverse order so that the first defs
- apprear at the start of the chain. */
+ appear at the start of the chain. */
for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
insn = PREV_INSN (insn))
{
struct ref *def = link->ref;
unsigned int dregno = DF_REF_REGNO (def);
+ /* Don't add ref's to the chain two times. I.e. only add
+ new refs. XXX the same could be done by testing if the current
+ insn is a modified (or a new) one. This would be faster. */
+ if (DF_REF_ID (def) < df->def_id_save)
+ continue;
df->regs[dregno].defs
= df_link_create (def, df->regs[dregno].defs);
{
struct ref *use = link->ref;
unsigned int uregno = DF_REF_REGNO (use);
+ /* Don't add ref's to the chain two times. I.e. only add
+ new refs. XXX the same could be done by testing if the current
+ insn is a modified (or a new) one. This would be faster. */
+ if (DF_REF_ID (use) < df->use_id_save)
+ continue;
df->regs[uregno].uses
= df_link_create (use, df->regs[uregno].uses);
}
\f
-/* Use reverse completion order, and the worklist, to figure out what block
- to look at next. */
-
-static int
-df_visit_next_rc (df, blocks)
- struct df *df ATTRIBUTE_UNUSED;
- sbitmap blocks;
-{
- int i=0;
- for (i = 0; i < n_basic_blocks; i++)
- if (TEST_BIT (blocks, df->rc_order[i]))
- return df->rc_order[i];
- return sbitmap_first_set_bit (blocks);
-}
-
-/* Use reverse topsort order, and the worklist, to figure out what block
- to look at next. */
-static int
-df_visit_next_rts (df, blocks)
- struct df *df ATTRIBUTE_UNUSED;
- sbitmap blocks;
-{
- int i=0;
- for (i = 0; i < n_basic_blocks; i++)
- if (TEST_BIT (blocks, df->rts_order[i]))
- return df->rts_order[i];
- return sbitmap_first_set_bit (blocks);
-}
-
-
-/* Calculate reaching defs for each basic block in BLOCKS, i.e., the
- defs that are live at the start of a basic block. */
static void
-df_rd_global_compute (df, blocks)
- struct df *df ATTRIBUTE_UNUSED;
- bitmap blocks;
+df_rd_transfer_function (bb, changed, in, out, gen, kill, data)
+ int bb ATTRIBUTE_UNUSED;
+ int *changed;
+ bitmap in, out, gen, kill;
+ void *data ATTRIBUTE_UNUSED;
{
- int i;
- basic_block bb;
- sbitmap worklist;
-
- worklist = sbitmap_alloc (n_basic_blocks);
- sbitmap_zero (worklist);
-
- /* Copy the blocklist to the worklist */
- EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
- {
- SET_BIT (worklist, i);
- });
-
- /* We assume that only the basic blocks in WORKLIST have been
- modified. */
- FOR_EACH_BB_IN_SBITMAP (worklist, 0, bb,
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- bitmap_copy (bb_info->rd_out, bb_info->rd_gen);
- });
-
- while ((i = df_visit_next_rc (df, worklist)) >= 0)
- {
- struct bb_info *bb_info;
- edge e;
- int changed;
-
- /* Remove this block from the worklist. */
- RESET_BIT (worklist, i);
-
-
- bb = BASIC_BLOCK (i);
- bb_info = DF_BB_INFO (df, bb);
-
- /* Calculate union of predecessor outs. */
- bitmap_zero (bb_info->rd_in);
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- struct bb_info *pred_refs = DF_BB_INFO (df, e->src);
-
- if (e->src == ENTRY_BLOCK_PTR)
- continue;
-
- bitmap_a_or_b (bb_info->rd_in, bb_info->rd_in,
- pred_refs->rd_out);
- }
-
- /* RD_OUT is the set of defs that are live at the end of the
- BB. These are the defs that are either generated by defs
- (RD_GEN) within the BB or are live at the start (RD_IN)
- and are not killed by other defs (RD_KILL). */
- changed = bitmap_union_of_diff (bb_info->rd_out, bb_info->rd_gen,
- bb_info->rd_in, bb_info->rd_kill);
-
- if (changed)
- {
- /* Add each of this block's successors to the worklist. */
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- if (e->dest == EXIT_BLOCK_PTR)
- continue;
-
- SET_BIT (worklist, e->dest->index);
- }
- }
- }
- sbitmap_free (worklist);
+ *changed = bitmap_union_of_diff (out, gen, in, kill);
}
-
-
-/* Calculate reaching uses for each basic block within BLOCKS, i.e.,
- the uses that are live at the start of a basic block. */
static void
-df_ru_global_compute (df, blocks)
- struct df *df ATTRIBUTE_UNUSED;
- bitmap blocks;
+df_ru_transfer_function (bb, changed, in, out, gen, kill, data)
+ int bb ATTRIBUTE_UNUSED;
+ int *changed;
+ bitmap in, out, gen, kill;
+ void *data ATTRIBUTE_UNUSED;
{
- int i;
- basic_block bb;
- sbitmap worklist;
-
- worklist = sbitmap_alloc (n_basic_blocks);
- sbitmap_zero (worklist);
-
- EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
- {
- SET_BIT (worklist, i);
- });
-
- /* We assume that only the basic blocks in WORKLIST have been
- modified. */
- FOR_EACH_BB_IN_SBITMAP (worklist, 0, bb,
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- bitmap_copy (bb_info->ru_in, bb_info->ru_gen);
- });
-
-
- while ((i = df_visit_next_rts (df, worklist)) >= 0)
- {
- struct bb_info *bb_info;
- edge e;
- int changed;
-
- /* Remove this block from the worklist. */
- RESET_BIT (worklist, i);
-
- bb = BASIC_BLOCK (i);
- bb_info = DF_BB_INFO (df, bb);
-
- /* Calculate union of successor ins. */
- bitmap_zero (bb_info->ru_out);
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- struct bb_info *succ_refs = DF_BB_INFO (df, e->dest);
-
- if (e->dest == EXIT_BLOCK_PTR)
- continue;
-
- bitmap_a_or_b (bb_info->ru_out, bb_info->ru_out,
- succ_refs->ru_in);
- }
-
- /* RU_IN is the set of uses that are live at the start of the
- BB. These are the uses that are either generated within the
- BB (RU_GEN) or are live at the end (RU_OUT) and are not uses
- killed by defs within the BB (RU_KILL). */
- changed = bitmap_union_of_diff (bb_info->ru_in, bb_info->ru_gen,
- bb_info->ru_out, bb_info->ru_kill);
-
- if (changed)
- {
- /* Add each of this block's predecessors to the worklist. */
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR)
- continue;
-
- SET_BIT (worklist, e->src->index);
- }
- }
- }
-
- sbitmap_free (worklist);
+ *changed = bitmap_union_of_diff (in, gen, out, kill);
}
-
-/* Calculate live registers for each basic block within BLOCKS. */
static void
-df_lr_global_compute (df, blocks)
- struct df *df ATTRIBUTE_UNUSED;
- bitmap blocks;
+df_lr_transfer_function (bb, changed, in, out, use, def, data)
+ int bb ATTRIBUTE_UNUSED;
+ int *changed;
+ bitmap in, out, use, def;
+ void *data ATTRIBUTE_UNUSED;
{
- int i;
- basic_block bb;
- bitmap worklist;
-
- worklist = BITMAP_XMALLOC ();
- bitmap_copy (worklist, blocks);
-
- /* We assume that only the basic blocks in WORKLIST have been
- modified. */
- FOR_EACH_BB_IN_BITMAP (worklist, 0, bb,
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- bitmap_copy (bb_info->lr_in, bb_info->lr_use);
- });
-
- while ((i = bitmap_last_set_bit (worklist)) >= 0)
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- edge e;
- int changed;
-
- /* Remove this block from the worklist. */
- bitmap_clear_bit (worklist, i);
-
- bb = BASIC_BLOCK (i);
- bb_info = DF_BB_INFO (df, bb);
-
- /* Calculate union of successor ins. */
- bitmap_zero (bb_info->lr_out);
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- struct bb_info *succ_refs = DF_BB_INFO (df, e->dest);
-
- if (e->dest == EXIT_BLOCK_PTR)
- continue;
-
- bitmap_a_or_b (bb_info->lr_out, bb_info->lr_out,
- succ_refs->lr_in);
- }
-
- /* LR_IN is the set of uses that are live at the start of the
- BB. These are the uses that are either generated by uses
- (LR_USE) within the BB or are live at the end (LR_OUT)
- and are not killed by other uses (LR_DEF). */
- changed = bitmap_union_of_diff (bb_info->lr_in, bb_info->lr_use,
- bb_info->lr_out, bb_info->lr_def);
-
- if (changed)
- {
- /* Add each of this block's predecessors to the worklist. */
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR)
- continue;
-
- bitmap_set_bit (worklist, e->src->index);
- }
- }
- }
- BITMAP_XFREE (worklist);
+ *changed = bitmap_union_of_diff (in, use, out, def);
}
struct bb_info *bb_info = DF_BB_INFO (df, bb);
rtx insn;
+
for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
insn = PREV_INSN (insn))
{
df_bb_reg_info_compute (df, bb, live)
struct df *df;
basic_block bb;
- bitmap live;
+ bitmap live;
{
struct reg_info *reg_info = df->regs;
struct bb_info *bb_info = DF_BB_INFO (df, bb);
return total;
}
-
/* Perform dataflow analysis using existing DF structure for blocks
within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
static void
{
int aflags;
int dflags;
+ int i;
+ basic_block bb;
dflags = 0;
aflags = flags;
df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
+ df->inverse_dfs_map = xmalloc (sizeof(int) * last_basic_block);
+ df->inverse_rc_map = xmalloc (sizeof(int) * last_basic_block);
+ df->inverse_rts_map = xmalloc (sizeof(int) * last_basic_block);
flow_depth_first_order_compute (df->dfs_order, df->rc_order);
flow_reverse_top_sort_order_compute (df->rts_order);
+ for (i = 0; i < n_basic_blocks; i ++)
+ {
+ df->inverse_dfs_map[df->dfs_order[i]] = i;
+ df->inverse_rc_map[df->rc_order[i]] = i;
+ df->inverse_rts_map[df->rts_order[i]] = i;
+ }
if (aflags & DF_RD)
{
/* Compute the sets of gens and kills for the defs of each bb. */
df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
-
- /* Compute the global reaching definitions. */
- df_rd_global_compute (df, df->all_blocks);
+ {
+ bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
+ FOR_EACH_BB (bb)
+ {
+ in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
+ out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
+ gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
+ kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
+ }
+ iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
+ FORWARD, UNION, df_rd_transfer_function,
+ df->inverse_rc_map, NULL);
+ free (in);
+ free (out);
+ free (gen);
+ free (kill);
+ }
}
if (aflags & DF_UD_CHAIN)
/* Compute the sets of gens and kills for the upwards exposed
uses in each bb. */
df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
-
- /* Compute the global reaching uses. */
- df_ru_global_compute (df, df->all_blocks);
+ {
+ bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
+ FOR_EACH_BB (bb)
+ {
+ in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
+ out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
+ gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
+ kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
+ }
+ iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
+ BACKWARD, UNION, df_ru_transfer_function,
+ df->inverse_rts_map, NULL);
+ free (in);
+ free (out);
+ free (gen);
+ free (kill);
+ }
}
if (aflags & DF_DU_CHAIN)
{
/* Compute the sets of defs and uses of live variables. */
df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
-
- /* Compute the global live variables. */
- df_lr_global_compute (df, df->all_blocks);
+ {
+ bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
+ FOR_EACH_BB (bb)
+ {
+ in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
+ out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
+ use[bb->index] = DF_BB_INFO (df, bb)->lr_use;
+ def[bb->index] = DF_BB_INFO (df, bb)->lr_def;
+ }
+ iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
+ BACKWARD, UNION, df_lr_transfer_function,
+ df->inverse_rts_map, NULL);
+ free (in);
+ free (out);
+ free (use);
+ free (def);
+ }
}
if (aflags & DF_REG_INFO)
free (df->dfs_order);
free (df->rc_order);
free (df->rts_order);
+ free (df->inverse_rc_map);
+ free (df->inverse_dfs_map);
+ free (df->inverse_rts_map);
}
-/* Initialise dataflow analysis. */
+/* Initialize dataflow analysis. */
struct df *
df_init ()
{
/* Scan the insn for refs. */
df_insn_refs_record (df, bb, insn);
-
- bitmap_clear_bit (df->insns_modified, uid);
count++;
}
if (insn == bb->end)
struct df *df;
bitmap blocks;
{
- unsigned int j;
int update = 0;
+ basic_block bb;
+
+ if (!df->n_bbs)
+ return 0;
- for (j = 0; j < df->n_bbs; j++)
- if (bitmap_bit_p (df->bbs_modified, j)
- && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, j)))
+ FOR_EACH_BB (bb)
+ if (bitmap_bit_p (df->bbs_modified, bb->index)
+ && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->index)))
{
update = 1;
break;
/* We could deal with additional basic blocks being created by
rescanning everything again. */
- if (df->n_bbs && df->n_bbs != (unsigned int)n_basic_blocks)
+ if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
abort ();
update = df_modified_p (df, blocks);
/* Recompute everything from scratch. */
df_free (df);
}
- /* Allocate and initialise data structures. */
+ /* Allocate and initialize data structures. */
df_alloc (df, max_reg_num ());
df_analyse_1 (df, 0, flags, 0);
update = 1;
df_analyse_1 (df, blocks, flags, 1);
bitmap_zero (df->bbs_modified);
+ bitmap_zero (df->insns_modified);
}
}
return update;
}
else
{
- FOR_ALL_BBS (bb,
- {
+ FOR_EACH_BB (bb)
df_bb_refs_unlink (df, bb);
- });
}
}
#endif
unsigned int uid;
uid = INSN_UID (insn);
-
if (uid >= df->insn_size)
- df_insn_table_realloc (df, 0);
+ df_insn_table_realloc (df, uid);
bitmap_set_bit (df->bbs_modified, bb->index);
bitmap_set_bit (df->insns_modified, uid);
-#if 0
/* For incremental updating on the fly, perhaps we could make a copy
of all the refs of the original insn and turn them into
anti-refs. When df_refs_update finds these anti-refs, it annihilates
the original refs. If validate_change fails then these anti-refs
will just get ignored. */
- */
-#endif
}
args.replacement = reg;
args.modified = 0;
- /* Seach and replace all matching mems within insn. */
+ /* Search and replace all matching mems within insn. */
for_each_rtx (&insn, df_rtx_mem_replace, &args);
if (args.modified)
uid = INSN_UID (insn);
if (uid >= df->insn_size)
- df_insn_table_realloc (df, 0);
+ df_insn_table_realloc (df, uid);
df_insn_modify (df, bb, insn);
for (; link; link = link->next)
{
fprintf (file, "%c%d(%d) ",
- DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
- DF_REF_ID (link->ref),
- DF_REF_REGNO (link->ref));
+ DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
+ DF_REF_ID (link->ref),
+ DF_REF_REGNO (link->ref));
}
fprintf (file, "}");
}
int flags;
FILE *file;
{
- unsigned int i;
unsigned int j;
+ basic_block bb;
if (! df || ! file)
return;
if (flags & DF_RD)
{
+ basic_block bb;
+
fprintf (file, "Reaching defs:\n");
- for (i = 0; i < df->n_bbs; i++)
+ FOR_EACH_BB (bb)
{
- basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->rd_in)
continue;
- fprintf (file, "bb %d in \t", i);
+ fprintf (file, "bb %d in \t", bb->index);
dump_bitmap (file, bb_info->rd_in);
- fprintf (file, "bb %d gen \t", i);
+ fprintf (file, "bb %d gen \t", bb->index);
dump_bitmap (file, bb_info->rd_gen);
- fprintf (file, "bb %d kill\t", i);
+ fprintf (file, "bb %d kill\t", bb->index);
dump_bitmap (file, bb_info->rd_kill);
- fprintf (file, "bb %d out \t", i);
+ fprintf (file, "bb %d out \t", bb->index);
dump_bitmap (file, bb_info->rd_out);
}
}
DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])),
DF_REF_INSN_UID (df->defs[j]),
DF_REF_REGNO (df->defs[j]));
+ if (df->defs[j]->flags & DF_REF_READ_WRITE)
+ fprintf (file, "read/write ");
df_chain_dump (DF_REF_CHAIN (df->defs[j]), file);
fprintf (file, "\n");
}
if (flags & DF_RU)
{
fprintf (file, "Reaching uses:\n");
- for (i = 0; i < df->n_bbs; i++)
+ FOR_EACH_BB (bb)
{
- basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->ru_in)
continue;
- fprintf (file, "bb %d in \t", i);
+ fprintf (file, "bb %d in \t", bb->index);
dump_bitmap (file, bb_info->ru_in);
- fprintf (file, "bb %d gen \t", i);
+ fprintf (file, "bb %d gen \t", bb->index);
dump_bitmap (file, bb_info->ru_gen);
- fprintf (file, "bb %d kill\t", i);
+ fprintf (file, "bb %d kill\t", bb->index);
dump_bitmap (file, bb_info->ru_kill);
- fprintf (file, "bb %d out \t", i);
+ fprintf (file, "bb %d out \t", bb->index);
dump_bitmap (file, bb_info->ru_out);
}
}
DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])),
DF_REF_INSN_UID (df->uses[j]),
DF_REF_REGNO (df->uses[j]));
+ if (df->uses[j]->flags & DF_REF_READ_WRITE)
+ fprintf (file, "read/write ");
df_chain_dump (DF_REF_CHAIN (df->uses[j]), file);
fprintf (file, "\n");
}
if (flags & DF_LR)
{
fprintf (file, "Live regs:\n");
- for (i = 0; i < df->n_bbs; i++)
+ FOR_EACH_BB (bb)
{
- basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->lr_in)
continue;
- fprintf (file, "bb %d in \t", i);
+ fprintf (file, "bb %d in \t", bb->index);
dump_bitmap (file, bb_info->lr_in);
- fprintf (file, "bb %d use \t", i);
+ fprintf (file, "bb %d use \t", bb->index);
dump_bitmap (file, bb_info->lr_use);
- fprintf (file, "bb %d def \t", i);
+ fprintf (file, "bb %d def \t", bb->index);
dump_bitmap (file, bb_info->lr_def);
- fprintf (file, "bb %d out \t", i);
+ fprintf (file, "bb %d out \t", bb->index);
dump_bitmap (file, bb_info->lr_out);
}
}
bbi = -1;
fprintf (file, "insn %d bb %d luid %d defs ",
- uid, bbi, DF_INSN_LUID (df, insn));
+ uid, bbi, DF_INSN_LUID (df, insn));
df_chain_dump_regno (df->insns[uid].defs, file);
fprintf (file, " uses ");
df_chain_dump_regno (df->insns[uid].uses, file);
df_chain_dump (link, stderr);
fputc ('\n', stderr);
}
+
+/* Hybrid search algorithm from "Implementation Techniques for
+ Efficient Data-Flow Analysis of Large Programs". */
+static void
+hybrid_search_bitmap (block, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
+ data)
+ basic_block block;
+ bitmap *in, *out, *gen, *kill;
+ enum df_flow_dir dir;
+ enum df_confluence_op conf_op;
+ transfer_function_bitmap transfun;
+ sbitmap visited;
+ sbitmap pending;
+ void *data;
+{
+ int changed;
+ int i = block->index;
+ edge e;
+ basic_block bb= block;
+ SET_BIT (visited, block->index);
+ if (TEST_BIT (pending, block->index))
+ {
+ if (dir == FORWARD)
+ {
+ /* Calculate <conf_op> of predecessor_outs */
+ bitmap_zero (in[i]);
+ for (e = bb->pred; e != 0; e = e->pred_next)
+ {
+ if (e->src == ENTRY_BLOCK_PTR)
+ continue;
+ switch (conf_op)
+ {
+ case UNION:
+ bitmap_a_or_b (in[i], in[i], out[e->src->index]);
+ break;
+ case INTERSECTION:
+ bitmap_a_and_b (in[i], in[i], out[e->src->index]);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Calculate <conf_op> of successor ins */
+ bitmap_zero(out[i]);
+ for (e = bb->succ; e != 0; e = e->succ_next)
+ {
+ if (e->dest == EXIT_BLOCK_PTR)
+ continue;
+ switch (conf_op)
+ {
+ case UNION:
+ bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
+ break;
+ case INTERSECTION:
+ bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
+ break;
+ }
+ }
+ }
+ /* Common part */
+ (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
+ RESET_BIT (pending, i);
+ if (changed)
+ {
+ if (dir == FORWARD)
+ {
+ for (e = bb->succ; e != 0; e = e->succ_next)
+ {
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
+ continue;
+ SET_BIT (pending, e->dest->index);
+ }
+ }
+ else
+ {
+ for (e = bb->pred; e != 0; e = e->pred_next)
+ {
+ if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
+ continue;
+ SET_BIT (pending, e->src->index);
+ }
+ }
+ }
+ }
+ if (dir == FORWARD)
+ {
+ for (e = bb->succ; e != 0; e = e->succ_next)
+ {
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
+ continue;
+ if (!TEST_BIT (visited, e->dest->index))
+ hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
+ data);
+ }
+ }
+ else
+ {
+ for (e = bb->pred; e != 0; e = e->pred_next)
+ {
+ if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
+ continue;
+ if (!TEST_BIT (visited, e->src->index))
+ hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
+ data);
+ }
+ }
+}
+
+
+/* Hybrid search for sbitmaps, rather than bitmaps. */
+static void
+hybrid_search_sbitmap (block, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
+ data)
+ basic_block block;
+ sbitmap *in, *out, *gen, *kill;
+ enum df_flow_dir dir;
+ enum df_confluence_op conf_op;
+ transfer_function_sbitmap transfun;
+ sbitmap visited;
+ sbitmap pending;
+ void *data;
+{
+ int changed;
+ int i = block->index;
+ edge e;
+ basic_block bb= block;
+ SET_BIT (visited, block->index);
+ if (TEST_BIT (pending, block->index))
+ {
+ if (dir == FORWARD)
+ {
+ /* Calculate <conf_op> of predecessor_outs */
+ sbitmap_zero (in[i]);
+ for (e = bb->pred; e != 0; e = e->pred_next)
+ {
+ if (e->src == ENTRY_BLOCK_PTR)
+ continue;
+ switch (conf_op)
+ {
+ case UNION:
+ sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
+ break;
+ case INTERSECTION:
+ sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Calculate <conf_op> of successor ins */
+ sbitmap_zero(out[i]);
+ for (e = bb->succ; e != 0; e = e->succ_next)
+ {
+ if (e->dest == EXIT_BLOCK_PTR)
+ continue;
+ switch (conf_op)
+ {
+ case UNION:
+ sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
+ break;
+ case INTERSECTION:
+ sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
+ break;
+ }
+ }
+ }
+ /* Common part */
+ (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
+ RESET_BIT (pending, i);
+ if (changed)
+ {
+ if (dir == FORWARD)
+ {
+ for (e = bb->succ; e != 0; e = e->succ_next)
+ {
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
+ continue;
+ SET_BIT (pending, e->dest->index);
+ }
+ }
+ else
+ {
+ for (e = bb->pred; e != 0; e = e->pred_next)
+ {
+ if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
+ continue;
+ SET_BIT (pending, e->src->index);
+ }
+ }
+ }
+ }
+ if (dir == FORWARD)
+ {
+ for (e = bb->succ; e != 0; e = e->succ_next)
+ {
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
+ continue;
+ if (!TEST_BIT (visited, e->dest->index))
+ hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
+ data);
+ }
+ }
+ else
+ {
+ for (e = bb->pred; e != 0; e = e->pred_next)
+ {
+ if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
+ continue;
+ if (!TEST_BIT (visited, e->src->index))
+ hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
+ data);
+ }
+ }
+}
+
+
+
+
+/* gen = GEN set.
+ kill = KILL set.
+ in, out = Filled in by function.
+ blocks = Blocks to analyze.
+ dir = Dataflow direction.
+ conf_op = Confluence operation.
+ transfun = Transfer function.
+ order = Order to iterate in. (Should map block numbers -> order)
+ data = Whatever you want. It's passed to the transfer function.
+
+ This function will perform iterative bitvector dataflow, producing
+ the in and out sets. Even if you only want to perform it for a
+ small number of blocks, the vectors for in and out must be large
+ enough for *all* blocks, because changing one block might affect
+ others. However, it'll only put what you say to analyze on the
+ initial worklist.
+
+ For forward problems, you probably want to pass in a mapping of
+ block number to rc_order (like df->inverse_rc_map).
+*/
+void
+iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
+ dir, conf_op, transfun, order, data)
+ sbitmap *in, *out, *gen, *kill;
+ bitmap blocks;
+ enum df_flow_dir dir;
+ enum df_confluence_op conf_op;
+ transfer_function_sbitmap transfun;
+ int *order;
+ void *data;
+{
+ int i;
+ fibheap_t worklist;
+ basic_block bb;
+ sbitmap visited, pending;
+ pending = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (pending);
+ sbitmap_zero (visited);
+ worklist = fibheap_new ();
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
+ {
+ fibheap_insert (worklist, order[i], (void *) (size_t) i);
+ SET_BIT (pending, i);
+ if (dir == FORWARD)
+ sbitmap_copy (out[i], gen[i]);
+ else
+ sbitmap_copy (in[i], gen[i]);
+ });
+ while (sbitmap_first_set_bit (pending) != -1)
+ {
+ while (!fibheap_empty (worklist))
+ {
+ i = (size_t) fibheap_extract_min (worklist);
+ bb = BASIC_BLOCK (i);
+ if (!TEST_BIT (visited, bb->index))
+ hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending, data);
+ }
+ if (sbitmap_first_set_bit (pending) != -1)
+ {
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
+ {
+ fibheap_insert (worklist, order[i], (void *) (size_t) i);
+ });
+ sbitmap_zero (visited);
+ }
+ else
+ {
+ break;
+ }
+ }
+ sbitmap_free (pending);
+ sbitmap_free (visited);
+ fibheap_delete (worklist);
+}
+
+/* Exactly the same as iterative_dataflow_sbitmap, except it works on
+ bitmaps instead */
+void
+iterative_dataflow_bitmap (in, out, gen, kill, blocks,
+ dir, conf_op, transfun, order, data)
+ bitmap *in, *out, *gen, *kill;
+ bitmap blocks;
+ enum df_flow_dir dir;
+ enum df_confluence_op conf_op;
+ transfer_function_bitmap transfun;
+ int *order;
+ void *data;
+{
+ int i;
+ fibheap_t worklist;
+ basic_block bb;
+ sbitmap visited, pending;
+ pending = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (pending);
+ sbitmap_zero (visited);
+ worklist = fibheap_new ();
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
+ {
+ fibheap_insert (worklist, order[i], (void *) (size_t) i);
+ SET_BIT (pending, i);
+ if (dir == FORWARD)
+ bitmap_copy (out[i], gen[i]);
+ else
+ bitmap_copy (in[i], gen[i]);
+ });
+ while (sbitmap_first_set_bit (pending) != -1)
+ {
+ while (!fibheap_empty (worklist))
+ {
+ i = (size_t) fibheap_extract_min (worklist);
+ bb = BASIC_BLOCK (i);
+ if (!TEST_BIT (visited, bb->index))
+ hybrid_search_bitmap (bb, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending, data);
+ }
+ if (sbitmap_first_set_bit (pending) != -1)
+ {
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
+ {
+ fibheap_insert (worklist, order[i], (void *) (size_t) i);
+ });
+ sbitmap_zero (visited);
+ }
+ else
+ {
+ break;
+ }
+ }
+ sbitmap_free (pending);
+ sbitmap_free (visited);
+ fibheap_delete (worklist);
+}