/* Common subexpression elimination for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
/* Insn being scanned. */
static rtx this_insn;
+static bool optimize_this_for_speed_p;
/* Index by register number, gives the number of the next (or
previous) register in the chain of registers sharing the same
static inline unsigned canon_hash (rtx, enum machine_mode);
static inline unsigned safe_hash (rtx, enum machine_mode);
-static unsigned hash_rtx_string (const char *);
+static inline unsigned hash_rtx_string (const char *);
static rtx canon_reg (rtx, rtx);
static enum rtx_code find_comparison_args (enum rtx_code, rtx *, rtx *,
static void record_jump_equiv (rtx, bool);
static void record_jump_cond (enum rtx_code, enum machine_mode, rtx, rtx,
int);
-static void cse_insn (rtx, rtx);
+static void cse_insn (rtx);
static void cse_prescan_path (struct cse_basic_block_data *);
static void invalidate_from_clobbers (rtx);
static rtx cse_process_notes (rtx, rtx, bool *);
static void flush_hash_table (void);
static bool insn_live_p (rtx, int *);
static bool set_live_p (rtx, rtx, int *);
-static bool dead_libcall_p (rtx, int *);
static int cse_change_cc_mode (rtx *, void *);
static void cse_change_cc_mode_insn (rtx, rtx);
static void cse_change_cc_mode_insns (rtx, rtx, rtx);
-static enum machine_mode cse_cc_succs (basic_block, rtx, rtx, bool);
+static enum machine_mode cse_cc_succs (basic_block, basic_block, rtx, rtx,
+ bool);
\f
#undef RTL_HOOKS_GEN_LOWPART
approx_reg_cost_1 (rtx *xp, void *data)
{
rtx x = *xp;
- int *cost_p = data;
+ int *cost_p = (int *) data;
if (x && REG_P (x))
{
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
? 0
- : rtx_cost (x, outer) * 2);
+ : rtx_cost (x, outer, optimize_this_for_speed_p) * 2);
}
\f
OLD is not changing; NEW is. */
static void
-make_regs_eqv (unsigned int new, unsigned int old)
+make_regs_eqv (unsigned int new_reg, unsigned int old_reg)
{
unsigned int lastr, firstr;
- int q = REG_QTY (old);
+ int q = REG_QTY (old_reg);
struct qty_table_elem *ent;
ent = &qty_table[q];
/* Nothing should become eqv until it has a "non-invalid" qty number. */
- gcc_assert (REGNO_QTY_VALID_P (old));
+ gcc_assert (REGNO_QTY_VALID_P (old_reg));
- REG_QTY (new) = q;
+ REG_QTY (new_reg) = q;
firstr = ent->first_reg;
lastr = ent->last_reg;
that not only can they not be allocated by the compiler, but
they cannot be used in substitutions or canonicalizations
either. */
- && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
- && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
- || (new >= FIRST_PSEUDO_REGISTER
+ && (new_reg >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new_reg) != NO_REGS)
+ && ((new_reg < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new_reg))
+ || (new_reg >= FIRST_PSEUDO_REGISTER
&& (firstr < FIRST_PSEUDO_REGISTER
- || (bitmap_bit_p (cse_ebb_live_out, new)
+ || (bitmap_bit_p (cse_ebb_live_out, new_reg)
&& !bitmap_bit_p (cse_ebb_live_out, firstr))
- || (bitmap_bit_p (cse_ebb_live_in, new)
+ || (bitmap_bit_p (cse_ebb_live_in, new_reg)
&& !bitmap_bit_p (cse_ebb_live_in, firstr))))))
{
- reg_eqv_table[firstr].prev = new;
- reg_eqv_table[new].next = firstr;
- reg_eqv_table[new].prev = -1;
- ent->first_reg = new;
+ reg_eqv_table[firstr].prev = new_reg;
+ reg_eqv_table[new_reg].next = firstr;
+ reg_eqv_table[new_reg].prev = -1;
+ ent->first_reg = new_reg;
}
else
{
equivalent for anything. */
while (lastr < FIRST_PSEUDO_REGISTER && reg_eqv_table[lastr].prev >= 0
&& (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
- && new >= FIRST_PSEUDO_REGISTER)
+ && new_reg >= FIRST_PSEUDO_REGISTER)
lastr = reg_eqv_table[lastr].prev;
- reg_eqv_table[new].next = reg_eqv_table[lastr].next;
+ reg_eqv_table[new_reg].next = reg_eqv_table[lastr].next;
if (reg_eqv_table[lastr].next >= 0)
- reg_eqv_table[reg_eqv_table[lastr].next].prev = new;
+ reg_eqv_table[reg_eqv_table[lastr].next].prev = new_reg;
else
- qty_table[q].last_reg = new;
- reg_eqv_table[lastr].next = new;
- reg_eqv_table[new].prev = lastr;
+ qty_table[q].last_reg = new_reg;
+ reg_eqv_table[lastr].next = new_reg;
+ reg_eqv_table[new_reg].prev = lastr;
}
}
struct table_elt *p
= lookup (x, SAFE_HASH (x, VOIDmode), GET_MODE (x));
- /* If we are looking for a CONST_INT, the mode doesn't really matter, as
- long as we are narrowing. So if we looked in vain for a mode narrower
- than word_mode before, look for word_mode now. */
- if (p == 0 && code == CONST_INT
- && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode))
- {
- x = copy_rtx (x);
- PUT_MODE (x, word_mode);
- p = lookup (x, SAFE_HASH (x, VOIDmode), word_mode);
- }
-
if (p == 0)
return 0;
static void
merge_equiv_classes (struct table_elt *class1, struct table_elt *class2)
{
- struct table_elt *elt, *next, *new;
+ struct table_elt *elt, *next, *new_elt;
/* Ensure we start with the head of the classes. */
class1 = class1->first_same_value;
rehash_using_reg (exp);
hash = HASH (exp, mode);
}
- new = insert (exp, class1, hash, mode);
- new->in_memory = hash_arg_in_memory;
+ new_elt = insert (exp, class1, hash, mode);
+ new_elt->in_memory = hash_arg_in_memory;
}
}
}
return plus_constant (q->exp, offset);
}
\f
+
/* Hash a string. Just add its bytes up. */
static inline unsigned
hash_rtx_string (const char *ps)
return hash;
}
-/* Hash an rtx. We are careful to make sure the value is never negative.
- Equivalent registers hash identically.
- MODE is used in hashing for CONST_INTs only;
- otherwise the mode of X is used.
-
- Store 1 in DO_NOT_RECORD_P if any subexpression is volatile.
-
- If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains
- a MEM rtx which does not have the RTX_UNCHANGING_P bit set.
-
- Note that cse_insn knows that the hash code of a MEM expression
- is just (int) MEM plus the hash code of the address. */
+/* Same as hash_rtx, but call CB on each rtx if it is not NULL.
+ When the callback returns true, we continue with the new rtx. */
unsigned
-hash_rtx (const_rtx x, enum machine_mode mode, int *do_not_record_p,
- int *hash_arg_in_memory_p, bool have_reg_qty)
+hash_rtx_cb (const_rtx x, enum machine_mode mode,
+ int *do_not_record_p, int *hash_arg_in_memory_p,
+ bool have_reg_qty, hash_rtx_callback_function cb)
{
int i, j;
unsigned hash = 0;
enum rtx_code code;
const char *fmt;
+ enum machine_mode newmode;
+ rtx newx;
/* Used to turn recursion into iteration. We can't rely on GCC's
tail-recursion elimination since we need to keep accumulating values
if (x == 0)
return hash;
+ /* Invoke the callback first. */
+ if (cb != NULL
+ && ((*cb) (x, mode, &newx, &newmode)))
+ {
+ hash += hash_rtx_cb (newx, newmode, do_not_record_p,
+ hash_arg_in_memory_p, have_reg_qty, cb);
+ return hash;
+ }
+
code = GET_CODE (x);
switch (code)
{
{
unsigned int regno = REGNO (x);
- if (!reload_completed)
+ if (do_not_record_p && !reload_completed)
{
/* On some machines, we can't record any non-fixed hard register,
because extending its life will cause reload problems. We
for (i = 0; i < units; ++i)
{
elt = CONST_VECTOR_ELT (x, i);
- hash += hash_rtx (elt, GET_MODE (elt), do_not_record_p,
- hash_arg_in_memory_p, have_reg_qty);
+ hash += hash_rtx_cb (elt, GET_MODE (elt),
+ do_not_record_p, hash_arg_in_memory_p,
+ have_reg_qty, cb);
}
return hash;
case MEM:
/* We don't record if marked volatile or if BLKmode since we don't
know the size of the move. */
- if (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode)
+ if (do_not_record_p && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
{
*do_not_record_p = 1;
return 0;
case CC0:
case CALL:
case UNSPEC_VOLATILE:
- *do_not_record_p = 1;
- return 0;
+ if (do_not_record_p) {
+ *do_not_record_p = 1;
+ return 0;
+ }
+ else
+ return hash;
+ break;
case ASM_OPERANDS:
- if (MEM_VOLATILE_P (x))
+ if (do_not_record_p && MEM_VOLATILE_P (x))
{
*do_not_record_p = 1;
return 0;
{
for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
{
- hash += (hash_rtx (ASM_OPERANDS_INPUT (x, i),
- GET_MODE (ASM_OPERANDS_INPUT (x, i)),
- do_not_record_p, hash_arg_in_memory_p,
- have_reg_qty)
+ hash += (hash_rtx_cb (ASM_OPERANDS_INPUT (x, i),
+ GET_MODE (ASM_OPERANDS_INPUT (x, i)),
+ do_not_record_p, hash_arg_in_memory_p,
+ have_reg_qty, cb)
+ hash_rtx_string
- (ASM_OPERANDS_INPUT_CONSTRAINT (x, i)));
+ (ASM_OPERANDS_INPUT_CONSTRAINT (x, i)));
}
hash += hash_rtx_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
x = XEXP (x, i);
goto repeat;
}
-
- hash += hash_rtx (XEXP (x, i), 0, do_not_record_p,
- hash_arg_in_memory_p, have_reg_qty);
+
+ hash += hash_rtx_cb (XEXP (x, i), 0, do_not_record_p,
+ hash_arg_in_memory_p,
+ have_reg_qty, cb);
break;
case 'E':
for (j = 0; j < XVECLEN (x, i); j++)
- hash += hash_rtx (XVECEXP (x, i, j), 0, do_not_record_p,
- hash_arg_in_memory_p, have_reg_qty);
+ hash += hash_rtx_cb (XVECEXP (x, i, j), 0, do_not_record_p,
+ hash_arg_in_memory_p,
+ have_reg_qty, cb);
break;
case 's':
return hash;
}
+/* Hash an rtx. We are careful to make sure the value is never negative.
+ Equivalent registers hash identically.
+ MODE is used in hashing for CONST_INTs only;
+ otherwise the mode of X is used.
+
+ Store 1 in DO_NOT_RECORD_P if any subexpression is volatile.
+
+ If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains
+ a MEM rtx which does not have the RTX_UNCHANGING_P bit set.
+
+ Note that cse_insn knows that the hash code of a MEM expression
+ is just (int) MEM plus the hash code of the address. */
+
+unsigned
+hash_rtx (const_rtx x, enum machine_mode mode, int *do_not_record_p,
+ int *hash_arg_in_memory_p, bool have_reg_qty)
+{
+ return hash_rtx_cb (x, mode, do_not_record_p,
+ hash_arg_in_memory_p, have_reg_qty, NULL);
+}
+
/* Hash an rtx X for cse via hash_rtx.
Stores 1 in do_not_record if any subexpression is volatile.
Stores 1 in hash_arg_in_memory if X contains a mem rtx which
{
if (*xloc)
{
- rtx new = canon_reg (*xloc, insn);
+ rtx new_rtx = canon_reg (*xloc, insn);
/* If replacing pseudo with hard reg or vice versa, ensure the
insn remains valid. Likewise if the insn has MATCH_DUPs. */
- gcc_assert (insn && new);
- validate_change (insn, xloc, new, 1);
+ gcc_assert (insn && new_rtx);
+ validate_change (insn, xloc, new_rtx, 1);
}
}
enum machine_mode mode;
const char *fmt;
int i;
- rtx new = 0;
+ rtx new_rtx = 0;
int changed = 0;
/* Operands of X. */
{
case MEM:
case SUBREG:
- if ((new = equiv_constant (x)) != NULL_RTX)
- return new;
+ if ((new_rtx = equiv_constant (x)) != NULL_RTX)
+ return new_rtx;
return x;
case CONST:
{
case RTX_UNARY:
{
- int is_const = 0;
-
/* We can't simplify extension ops unless we know the
original mode. */
if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
&& mode_arg0 == VOIDmode)
break;
- /* If we had a CONST, strip it off and put it back later if we
- fold. */
- if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
- is_const = 1, const_arg0 = XEXP (const_arg0, 0);
-
- new = simplify_unary_operation (code, mode,
+ new_rtx = simplify_unary_operation (code, mode,
const_arg0 ? const_arg0 : folded_arg0,
mode_arg0);
- /* NEG of PLUS could be converted into MINUS, but that causes
- expressions of the form
- (CONST (MINUS (CONST_INT) (SYMBOL_REF)))
- which many ports mistakenly treat as LEGITIMATE_CONSTANT_P.
- FIXME: those ports should be fixed. */
- if (new != 0 && is_const
- && GET_CODE (new) == PLUS
- && (GET_CODE (XEXP (new, 0)) == SYMBOL_REF
- || GET_CODE (XEXP (new, 0)) == LABEL_REF)
- && GET_CODE (XEXP (new, 1)) == CONST_INT)
- new = gen_rtx_CONST (mode, new);
}
break;
if (const_arg0 == 0 || const_arg1 == 0)
{
struct table_elt *p0, *p1;
- rtx true_rtx = const_true_rtx, false_rtx = const0_rtx;
+ rtx true_rtx, false_rtx;
enum machine_mode mode_arg1;
-#ifdef FLOAT_STORE_FLAG_VALUE
if (SCALAR_FLOAT_MODE_P (mode))
{
+#ifdef FLOAT_STORE_FLAG_VALUE
true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE
(FLOAT_STORE_FLAG_VALUE (mode), mode));
+#else
+ true_rtx = NULL_RTX;
+#endif
false_rtx = CONST0_RTX (mode);
}
-#endif
+ else
+ {
+ true_rtx = const_true_rtx;
+ false_rtx = const0_rtx;
+ }
code = find_comparison_args (code, &folded_arg0, &folded_arg1,
&mode_arg0, &mode_arg1);
const_arg1))
|| (REG_P (folded_arg1)
&& (REG_QTY (REGNO (folded_arg1)) == ent->comparison_qty))))
- return (comparison_dominates_p (ent->comparison_code, code)
- ? true_rtx : false_rtx);
+ {
+ if (comparison_dominates_p (ent->comparison_code, code))
+ {
+ if (true_rtx)
+ return true_rtx;
+ else
+ break;
+ }
+ else
+ return false_rtx;
+ }
}
}
}
{
rtx op0 = const_arg0 ? const_arg0 : folded_arg0;
rtx op1 = const_arg1 ? const_arg1 : folded_arg1;
- new = simplify_relational_operation (code, mode, mode_arg0, op0, op1);
+ new_rtx = simplify_relational_operation (code, mode, mode_arg0, op0, op1);
}
break;
int is_shift
= (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
rtx y, inner_const, new_const;
+ rtx canon_const_arg1 = const_arg1;
enum rtx_code associate_code;
if (is_shift
|| INTVAL (const_arg1) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
- const_arg1 = GEN_INT (INTVAL (const_arg1)
- & (GET_MODE_BITSIZE (mode) - 1));
+ canon_const_arg1 = GEN_INT (INTVAL (const_arg1)
+ & (GET_MODE_BITSIZE (mode)
+ - 1));
else
break;
}
associate_code = (is_shift || code == MINUS ? PLUS : code);
new_const = simplify_binary_operation (associate_code, mode,
- const_arg1, inner_const);
+ canon_const_arg1,
+ inner_const);
if (new_const == 0)
break;
break;
}
- new = simplify_binary_operation (code, mode,
+ new_rtx = simplify_binary_operation (code, mode,
const_arg0 ? const_arg0 : folded_arg0,
const_arg1 ? const_arg1 : folded_arg1);
break;
case RTX_TERNARY:
case RTX_BITFIELD_OPS:
- new = simplify_ternary_operation (code, mode, mode_arg0,
+ new_rtx = simplify_ternary_operation (code, mode, mode_arg0,
const_arg0 ? const_arg0 : folded_arg0,
const_arg1 ? const_arg1 : folded_arg1,
const_arg2 ? const_arg2 : XEXP (x, 2));
break;
}
- return new ? new : x;
+ return new_rtx ? new_rtx : x;
}
\f
/* Return a constant value currently equivalent to X.
if (GET_CODE (x) == SUBREG)
{
- rtx new;
+ enum machine_mode mode = GET_MODE (x);
+ enum machine_mode imode = GET_MODE (SUBREG_REG (x));
+ rtx new_rtx;
/* See if we previously assigned a constant value to this SUBREG. */
- if ((new = lookup_as_function (x, CONST_INT)) != 0
- || (new = lookup_as_function (x, CONST_DOUBLE)) != 0
- || (new = lookup_as_function (x, CONST_FIXED)) != 0)
- return new;
+ if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0
+ || (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0
+ || (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0)
+ return new_rtx;
+
+ /* If we didn't and if doing so makes sense, see if we previously
+ assigned a constant value to the enclosing word mode SUBREG. */
+ if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode)
+ && GET_MODE_SIZE (word_mode) < GET_MODE_SIZE (imode))
+ {
+ int byte = SUBREG_BYTE (x) - subreg_lowpart_offset (mode, word_mode);
+ if (byte >= 0 && (byte % UNITS_PER_WORD) == 0)
+ {
+ rtx y = gen_rtx_SUBREG (word_mode, SUBREG_REG (x), byte);
+ new_rtx = lookup_as_function (y, CONST_INT);
+ if (new_rtx)
+ return gen_lowpart (mode, new_rtx);
+ }
+ }
+ /* Otherwise see if we already have a constant for the inner REG. */
if (REG_P (SUBREG_REG (x))
- && (new = equiv_constant (SUBREG_REG (x))) != 0)
- return simplify_subreg (GET_MODE (x), SUBREG_REG (x),
- GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
+ && (new_rtx = equiv_constant (SUBREG_REG (x))) != 0)
+ return simplify_subreg (mode, new_rtx, imode, SUBREG_BYTE (x));
return 0;
}
First simplify sources and addresses of all assignments
in the instruction, using previously-computed equivalents values.
Then install the new sources and destinations in the table
- of available values.
-
- If LIBCALL_INSN is nonzero, don't record any equivalence made in
- the insn. It means that INSN is inside libcall block. In this
- case LIBCALL_INSN is the corresponding insn with REG_LIBCALL. */
+ of available values. */
/* Data on one SET contained in the instruction. */
ENUM_BITFIELD(machine_mode) mode : 8;
/* A constant equivalent for SET_SRC, if any. */
rtx src_const;
- /* Original SET_SRC value used for libcall notes. */
- rtx orig_src;
/* Hash value of constant equivalent for SET_SRC. */
unsigned src_const_hash;
/* Table entry for constant equivalent for SET_SRC, if any. */
};
static void
-cse_insn (rtx insn, rtx libcall_insn)
+cse_insn (rtx insn)
{
rtx x = PATTERN (insn);
int i;
if (GET_CODE (x) == SET)
{
- sets = alloca (sizeof (struct set));
+ sets = XALLOCA (struct set);
sets[0].rtl = x;
/* Ignore SETs that are unconditional jumps.
{
int lim = XVECLEN (x, 0);
- sets = alloca (lim * sizeof (struct set));
+ sets = XALLOCAVEC (struct set, lim);
/* Find all regs explicitly clobbered in this insn,
and ensure they are not replaced with any other regs
{
rtx dest = SET_DEST (sets[i].rtl);
rtx src = SET_SRC (sets[i].rtl);
- rtx new = canon_reg (src, insn);
+ rtx new_rtx = canon_reg (src, insn);
- sets[i].orig_src = src;
- validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
+ validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1);
if (GET_CODE (dest) == ZERO_EXTRACT)
{
enum machine_mode wider_mode;
for (wider_mode = GET_MODE_WIDER_MODE (mode);
- GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
+ wider_mode != VOIDmode
+ && GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
&& src_related == 0;
wider_mode = GET_MODE_WIDER_MODE (wider_mode))
{
else if (validate_unshare_change
(insn, &SET_SRC (sets[i].rtl), trial, 0))
{
- rtx new = canon_reg (SET_SRC (sets[i].rtl), insn);
-
- /* If we just made a substitution inside a libcall, then we
- need to make the same substitution in any notes attached
- to the RETVAL insn. */
- if (libcall_insn
- && (REG_P (sets[i].orig_src)
- || GET_CODE (sets[i].orig_src) == SUBREG
- || MEM_P (sets[i].orig_src)))
- {
- rtx note = find_reg_equal_equiv_note (libcall_insn);
- if (note != 0)
- XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0),
- sets[i].orig_src,
- copy_rtx (new));
- df_notes_rescan (libcall_insn);
- }
+ rtx new_rtx = canon_reg (SET_SRC (sets[i].rtl), insn);
/* The result of apply_change_group can be ignored; see
canon_reg. */
- validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
+ validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1);
apply_change_group ();
break;
and hope for the best. */
if (n_sets == 1)
{
- rtx new, note;
+ rtx new_rtx, note;
- new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
- JUMP_LABEL (new) = XEXP (src, 0);
+ new_rtx = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
+ JUMP_LABEL (new_rtx) = XEXP (src, 0);
LABEL_NUSES (XEXP (src, 0))++;
/* Make sure to copy over REG_NON_LOCAL_GOTO. */
if (note)
{
XEXP (note, 1) = NULL_RTX;
- REG_NOTES (new) = note;
+ REG_NOTES (new_rtx) = note;
}
delete_insn_and_edges (insn);
- insn = new;
+ insn = new_rtx;
}
else
INSN_CODE (insn) = -1;
if (sets[i].src_elt == 0)
{
- /* Don't put a hard register source into the table if this is
- the last insn of a libcall. In this case, we only need
- to put src_eqv_elt in src_elt. */
- if (! find_reg_note (insn, REG_RETVAL, NULL_RTX))
- {
- struct table_elt *elt;
+ struct table_elt *elt;
- /* Note that these insert_regs calls cannot remove
- any of the src_elt's, because they would have failed to
- match if not still valid. */
- if (insert_regs (src, classp, 0))
- {
- rehash_using_reg (src);
- sets[i].src_hash = HASH (src, mode);
- }
- elt = insert (src, classp, sets[i].src_hash, mode);
- elt->in_memory = sets[i].src_in_memory;
- sets[i].src_elt = classp = elt;
+ /* Note that these insert_regs calls cannot remove
+ any of the src_elt's, because they would have failed to
+ match if not still valid. */
+ if (insert_regs (src, classp, 0))
+ {
+ rehash_using_reg (src);
+ sets[i].src_hash = HASH (src, mode);
}
- else
- sets[i].src_elt = classp;
+ elt = insert (src, classp, sets[i].src_hash, mode);
+ elt->in_memory = sets[i].src_in_memory;
+ sets[i].src_elt = classp = elt;
}
if (sets[i].src_const && sets[i].src_const_elt == 0
&& src != sets[i].src_const
size of it, and can't be sure that other BLKmode values
have the same or smaller size. */
|| GET_MODE (dest) == BLKmode
- /* Don't record values of destinations set inside a libcall block
- since we might delete the libcall. Things should have been set
- up so we won't want to reuse such a value, but we play it safe
- here. */
- || libcall_insn
/* If we didn't put a REG_EQUAL value or a source into the hash
table, there is no point is recording DEST. */
|| sets[i].src_elt == 0
then be used in the sequel and we may be changing a two-operand insn
into a three-operand insn.
- Also do not do this if we are operating on a copy of INSN.
-
- Also don't do this if INSN ends a libcall; this would cause an unrelated
- register to be set in the middle of a libcall, and we then get bad code
- if the libcall is deleted. */
+ Also do not do this if we are operating on a copy of INSN. */
if (n_sets == 1 && sets[0].rtl && REG_P (SET_DEST (sets[0].rtl))
&& NEXT_INSN (PREV_INSN (insn)) == insn
int src_q = REG_QTY (REGNO (SET_SRC (sets[0].rtl)));
struct qty_table_elem *src_ent = &qty_table[src_q];
- if ((src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl)))
- && ! find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ if (src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl)))
{
/* Scan for the previous nonnote insn, but stop at a basic
block boundary. */
case ZERO_EXTEND:
case SUBREG:
{
- rtx new = cse_process_notes (XEXP (x, 0), object, changed);
+ rtx new_rtx = cse_process_notes (XEXP (x, 0), object, changed);
/* We don't substitute VOIDmode constants into these rtx,
since they would impede folding. */
- if (GET_MODE (new) != VOIDmode)
- validate_change (object, &XEXP (x, 0), new, 0);
+ if (GET_MODE (new_rtx) != VOIDmode)
+ validate_change (object, &XEXP (x, 0), new_rtx, 0);
return x;
}
&& (CONSTANT_P (ent->const_rtx)
|| REG_P (ent->const_rtx)))
{
- rtx new = gen_lowpart (GET_MODE (x), ent->const_rtx);
- if (new)
- return copy_rtx (new);
+ rtx new_rtx = gen_lowpart (GET_MODE (x), ent->const_rtx);
+ if (new_rtx)
+ return copy_rtx (new_rtx);
}
}
static rtx
cse_process_notes (rtx x, rtx object, bool *changed)
{
- rtx new = cse_process_notes_1 (x, object, changed);
- if (new != x)
+ rtx new_rtx = cse_process_notes_1 (x, object, changed);
+ if (new_rtx != x)
*changed = true;
- return new;
+ return new_rtx;
}
\f
{
basic_block bb;
rtx insn;
- rtx libcall_insn = NULL_RTX;
- int no_conflict = 0;
bb = ebb_data->path[path_entry].bb;
edge pointing to that bb. */
if (bb_has_eh_pred (bb))
{
- struct df_ref **def_rec;
+ df_ref *def_rec;
for (def_rec = df_get_artificial_defs (bb->index); *def_rec; def_rec++)
{
- struct df_ref *def = *def_rec;
+ df_ref def = *def_rec;
if (DF_REF_FLAGS (def) & DF_REF_AT_TOP)
invalidate (DF_REF_REG (def), GET_MODE (DF_REF_REG (def)));
}
FOR_BB_INSNS (bb, insn)
{
+ optimize_this_for_speed_p = optimize_bb_for_speed_p (bb);
/* If we have processed 1,000 insns, flush the hash table to
avoid extreme quadratic behavior. We must not include NOTEs
in the count since there may be more of them when generating
df_notes_rescan (insn);
}
- /* Track when we are inside in LIBCALL block. Inside such
- a block we do not want to record destinations. The last
- insn of a LIBCALL block is not considered to be part of
- the block, since its destination is the result of the
- block and hence should be recorded. */
- if (REG_NOTES (insn) != 0)
- {
- rtx p;
-
- if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
- libcall_insn = XEXP (p, 0);
- else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- {
- /* Keep libcall_insn for the last SET insn of
- a no-conflict block to prevent changing the
- destination. */
- if (!no_conflict)
- libcall_insn = NULL_RTX;
- else
- no_conflict = -1;
- }
- }
-
- cse_insn (insn, libcall_insn);
+ cse_insn (insn);
- /* If we kept libcall_insn for a no-conflict bock,
- clear it here. */
- if (no_conflict == -1)
- {
- libcall_insn = NULL_RTX;
- no_conflict = 0;
- }
-
/* If we haven't already found an insn where we added a LABEL_REF,
check this one. */
if (INSN_P (insn) && !recorded_label_ref
}
}
- /* Make sure that libcalls don't span multiple basic blocks. */
- gcc_assert (libcall_insn == NULL_RTX);
-
/* With non-call exceptions, we are not always able to update
the CFG properly inside cse_insn. So clean up possibly
redundant EH edges here. */
return true;
}
-/* Return true if libcall is dead as a whole. */
-
-static bool
-dead_libcall_p (rtx insn, int *counts)
-{
- rtx note, set, new;
-
- /* See if there's a REG_EQUAL note on this insn and try to
- replace the source with the REG_EQUAL expression.
-
- We assume that insns with REG_RETVALs can only be reg->reg
- copies at this point. */
- note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
- if (!note)
- return false;
-
- set = single_set (insn);
- if (!set)
- return false;
-
- new = simplify_rtx (XEXP (note, 0));
- if (!new)
- new = XEXP (note, 0);
-
- /* While changing insn, we must update the counts accordingly. */
- count_reg_usage (insn, counts, NULL_RTX, -1);
-
- if (validate_change (insn, &SET_SRC (set), new, 0))
- {
- count_reg_usage (insn, counts, NULL_RTX, 1);
- remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX));
- remove_note (insn, note);
- return true;
- }
-
- if (CONSTANT_P (new))
- {
- new = force_const_mem (GET_MODE (SET_DEST (set)), new);
- if (new && validate_change (insn, &SET_SRC (set), new, 0))
- {
- count_reg_usage (insn, counts, NULL_RTX, 1);
- remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX));
- remove_note (insn, note);
- return true;
- }
- }
-
- count_reg_usage (insn, counts, NULL_RTX, 1);
- return false;
-}
-
/* Scan all the insns and delete any that are dead; i.e., they store a register
that is never used or they copy a register to itself.
{
int *counts;
rtx insn, prev;
- int in_libcall = 0, dead_libcall = 0;
int ndead = 0;
timevar_push (TV_DELETE_TRIVIALLY_DEAD);
if (!INSN_P (insn))
continue;
- /* Don't delete any insns that are part of a libcall block unless
- we can delete the whole libcall block.
-
- Flow or loop might get confused if we did that. Remember
- that we are scanning backwards. */
- if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- {
- in_libcall = 1;
- live_insn = 1;
- dead_libcall = dead_libcall_p (insn, counts);
- }
- else if (in_libcall)
- live_insn = ! dead_libcall;
- else
- live_insn = insn_live_p (insn, counts);
+ live_insn = insn_live_p (insn, counts);
/* If this is a dead insn, delete it and show registers in it aren't
being used. */
delete_insn_and_edges (insn);
ndead++;
}
-
- if (in_libcall && find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- {
- in_libcall = 0;
- dead_libcall = 0;
- }
}
if (dump_file && ndead)
permitted to change the mode of CC_SRC to a compatible mode. This
returns VOIDmode if no equivalent assignments were found.
Otherwise it returns the mode which CC_SRC should wind up with.
+ ORIG_BB should be the same as BB in the outermost cse_cc_succs call,
+ but is passed unmodified down to recursive calls in order to prevent
+ endless recursion.
The main complexity in this function is handling the mode issues.
We may have more than one duplicate which we can eliminate, and we
try to find a mode which will work for multiple duplicates. */
static enum machine_mode
-cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode)
+cse_cc_succs (basic_block bb, basic_block orig_bb, rtx cc_reg, rtx cc_src,
+ bool can_change_mode)
{
bool found_equiv;
enum machine_mode mode;
continue;
if (EDGE_COUNT (e->dest->preds) != 1
- || e->dest == EXIT_BLOCK_PTR)
+ || e->dest == EXIT_BLOCK_PTR
+ /* Avoid endless recursion on unreachable blocks. */
+ || e->dest == orig_bb)
continue;
end = NEXT_INSN (BB_END (e->dest));
{
enum machine_mode submode;
- submode = cse_cc_succs (e->dest, cc_reg, cc_src, false);
+ submode = cse_cc_succs (e->dest, orig_bb, cc_reg, cc_src, false);
if (submode != VOIDmode)
{
gcc_assert (submode == mode);
the basic block. */
orig_mode = GET_MODE (cc_src);
- mode = cse_cc_succs (bb, cc_reg, cc_src, true);
+ mode = cse_cc_succs (bb, bb, cc_reg, cc_src, true);
if (mode != VOIDmode)
{
gcc_assert (mode == GET_MODE (cc_src));