/* Check the size of the invariant table and realloc if necessary. */
-static void
+static void
check_invariant_table_size (void)
{
if (invariant_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
- memset (&invariant_table[invariant_table_size], 0,
+ memset (&invariant_table[invariant_table_size], 0,
(new_size - invariant_table_size) * sizeof (struct rtx_iv *));
invariant_table_size = new_size;
}
if (def)
{
inv->cost = rtx_cost (set, SET, speed);
+ /* ??? Try to determine cheapness of address computation. Unfortunately
+ the address cost is only a relative measure, we can't really compare
+ it with any absolute number, but only with other address costs.
+ But here we don't have any other addresses, so compare with a magic
+ number anyway. It has to be large enough to not regress PR33928
+ (by avoiding to move reg+8,reg+16,reg+24 invariants), but small
+ enough to not regress 410.bwaves either (by still moving reg+reg
+ invariants).
+ See http://gcc.gnu.org/ml/gcc-patches/2009-10/msg01210.html . */
inv->cheap_address = address_cost (SET_SRC (set), word_mode,
- speed) < COSTS_N_INSNS (1);
+ ADDR_SPACE_GENERIC, speed) < 3;
}
else
{
struct df_link *defs;
struct def *def_data;
struct invariant *inv;
-
+
if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
return false;
-
+
defs = DF_REF_CHAIN (use);
if (!defs)
return true;
-
+
if (defs->next)
return false;
-
+
def = defs->ref;
check_invariant_table_size ();
inv = invariant_table[DF_REF_ID(def)];
if (!inv)
return false;
-
+
def_data = inv->def;
gcc_assert (def_data != NULL);
-
+
def_bb = DF_REF_BB (def);
/* Note that in case bb == def_bb, we know that the definition
dominates insn, because def has invariant_table[DF_REF_ID(def)]
sequentially. */
if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
return false;
-
+
bitmap_set_bit (depends_on, def_data->invno);
return true;
}
for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
if (!check_dependency (bb, *use_rec, depends_on))
return false;
-
+
return true;
}
rtx reg;
enum reg_class cover_class;
rtx set = single_set (insn);
-
+
/* Considered invariant insns have only one set. */
gcc_assert (set != NULL_RTX);
reg = SET_DEST (set);
return;
if (flag_ira_loop_pressure)
- /* REGS_USED is actually never used when the flag is on. */
+ /* REGS_USED is actually never used when the flag is on. */
regs_used = 0;
else
/* We do not really do a good job in estimating number of
unsigned int n_regs = DF_REG_SIZE (df);
regs_used = 2;
-
+
for (i = 0; i < n_regs; i++)
{
if (!DF_REGNO_FIRST_DEF (i) && DF_REGNO_LAST_USE (i))
{
*use->pos = reg;
df_insn_rescan (use->insn);
- }
+ }
}
return true;
{
def = inv->def;
gcc_assert (def != NULL);
-
+
free_use_list (def->uses);
free (def);
invariant_table[i] = NULL;
free_loop_data (struct loop *loop)
{
struct loop_data *data = LOOP_DATA (loop);
+ if (!data)
+ return;
bitmap_clear (&LOOP_DATA (loop)->regs_ref);
bitmap_clear (&LOOP_DATA (loop)->regs_live);
static int n_regs_set;
/* Return cover class and number of needed hard registers (through
- *NREGS) of register REGNO. */
+ *NREGS) of register REGNO. */
static enum reg_class
get_regno_cover_class (int regno, int *nregs)
{
if (code == REG)
{
struct loop *loop;
-
+
for (loop = curr_loop;
loop != current_loops->tree_root;
loop = loop_outer (loop))
else if (fmt[i] == 'E')
{
int j;
-
+
for (j = 0; j < XVECLEN (x, i); j++)
mark_ref_regs (XVECEXP (x, i, j));
}
mark_ref_regs (PATTERN (insn));
n_regs_set = 0;
note_stores (PATTERN (insn), mark_reg_clobber, NULL);
-
+
/* Mark any registers dead after INSN as dead now. */
-
+
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
-
+
/* Mark any registers set in INSN as live,
and mark them as conflicting with all other live regs.
Clobbers are processed again, so they conflict with
the registers that are set. */
-
+
note_stores (PATTERN (insn), mark_reg_store, NULL);
-
+
#ifdef AUTO_INC_DEC
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC)
REGNO (regs_set[n_regs_set]));
if (! note)
continue;
-
+
mark_reg_death (XEXP (note, 0));
}
}
for (i = 0; (int) i < ira_reg_class_cover_size; i++)
{
enum reg_class cover_class;
-
+
cover_class = ira_reg_class_cover[i];
if (LOOP_DATA (loop)->max_reg_pressure[cover_class] == 0)
continue;