/* Common subexpression elimination for GNU compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
/* stdio.h must precede rtl.h for FFS. */
#include "params.h"
#include "rtlhooks-def.h"
#include "tree-pass.h"
+#include "df.h"
+#include "dbgcnt.h"
/* The basic idea of common subexpression elimination is to go
through the code, keeping a record of expressions that would
table since its use is guaranteed to be the insn immediately following
its definition and any other insn is presumed to invalidate it.
- Instead, we store below the value last assigned to CC0. If it should
- happen to be a constant, it is stored in preference to the actual
- assigned value. In case it is a constant, we store the mode in which
- the constant should be interpreted. */
+ Instead, we store below the current and last value assigned to CC0.
+ If it should happen to be a constant, it is stored in preference
+ to the actual assigned value. In case it is a constant, we store
+ the mode in which the constant should be interpreted. */
-static rtx prev_insn_cc0;
-static enum machine_mode prev_insn_cc0_mode;
-
-/* Previous actual insn. 0 if at first insn of basic block. */
-
-static rtx prev_insn;
+static rtx this_insn_cc0, prev_insn_cc0;
+static enum machine_mode this_insn_cc0_mode, prev_insn_cc0_mode;
#endif
/* Insn being scanned. */
static rtx this_insn;
+static bool optimize_this_for_speed_p;
/* Index by register number, gives the number of the next (or
previous) register in the chain of registers sharing the same
static unsigned int cse_reg_info_table_first_uninitialized;
/* The timestamp at the beginning of the current run of
- cse_basic_block. We increment this variable at the beginning of
- the current run of cse_basic_block. The timestamp field of a
+ cse_extended_basic_block. We increment this variable at the beginning of
+ the current run of cse_extended_basic_block. The timestamp field of a
cse_reg_info entry matches the value of this variable if and only
if the entry has been initialized during the current run of
- cse_basic_block. */
+ cse_extended_basic_block. */
static unsigned int cse_reg_info_timestamp;
/* A HARD_REG_SET containing all the hard registers for which there is
static HARD_REG_SET hard_regs_in_table;
-/* CUID of insn that starts the basic block currently being cse-processed. */
-
-static int cse_basic_block_start;
-
-/* CUID of insn that ends the basic block currently being cse-processed. */
-
-static int cse_basic_block_end;
-
-/* Vector mapping INSN_UIDs to cuids.
- The cuids are like uids but increase monotonically always.
- We use them to see whether a reg is used outside a given basic block. */
-
-static int *uid_cuid;
+/* True if CSE has altered the CFG. */
+static bool cse_cfg_altered;
-/* Highest UID in UID_CUID. */
-static int max_uid;
+/* True if CSE has altered conditional jump insns in such a way
+ that jump optimization should be redone. */
+static bool cse_jumps_altered;
-/* Get the cuid of an insn. */
-
-#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
-
-/* Nonzero if cse has altered conditional jump insns
- in such a way that jump optimization should be redone. */
-
-static int cse_jumps_altered;
-
-/* Nonzero if we put a LABEL_REF into the hash table for an INSN without a
- REG_LABEL, we have to rerun jump after CSE to put in the note. */
-static int recorded_label_ref;
+/* True if we put a LABEL_REF into the hash table for an INSN
+ without a REG_LABEL_OPERAND, we have to rerun jump after CSE
+ to put in the note. */
+static bool recorded_label_ref;
/* canon_hash stores 1 in do_not_record
if it notices a reference to CC0, PC, or some other volatile
static int constant_pool_entries_cost;
static int constant_pool_entries_regcost;
-/* This data describes a block that will be processed by cse_basic_block. */
+/* This data describes a block that will be processed by
+ cse_extended_basic_block. */
struct cse_basic_block_data
{
- /* Lowest CUID value of insns in block. */
- int low_cuid;
- /* Highest CUID value of insns in block. */
- int high_cuid;
/* Total number of SETs in block. */
int nsets;
- /* Last insn in the block. */
- rtx last;
/* Size of current branch path, if any. */
int path_size;
- /* Current branch path, indicating which branches will be taken. */
+ /* Current path, indicating which basic_blocks will be processed. */
struct branch_path
{
- /* The branch insn. */
- rtx branch;
- /* Whether it should be taken or not. */
- enum taken {PATH_TAKEN, PATH_NOT_TAKEN} status;
+ /* The basic block for this path entry. */
+ basic_block bb;
} *path;
};
+
+/* Pointers to the live in/live out bitmaps for the boundaries of the
+ current EBB. */
+static bitmap cse_ebb_live_in, cse_ebb_live_out;
+
+/* A simple bitmap to track which basic blocks have been visited
+ already as part of an already processed extended basic block. */
+static sbitmap cse_visited_basic_blocks;
+
static bool fixed_base_plus_p (rtx x);
static int notreg_cost (rtx, enum rtx_code);
static int approx_reg_cost_1 (rtx *, void *);
static int mention_regs (rtx);
static int insert_regs (rtx, struct table_elt *, int);
static void remove_from_table (struct table_elt *, unsigned);
-static struct table_elt *lookup (rtx, unsigned, enum machine_mode);
+static void remove_pseudo_from_table (rtx, unsigned);
+static struct table_elt *lookup (rtx, unsigned, enum machine_mode);
static struct table_elt *lookup_for_remove (rtx, unsigned, enum machine_mode);
static rtx lookup_as_function (rtx, enum rtx_code);
static struct table_elt *insert (rtx, struct table_elt *, unsigned,
enum machine_mode);
static void merge_equiv_classes (struct table_elt *, struct table_elt *);
static void invalidate (rtx, enum machine_mode);
-static int cse_rtx_varies_p (rtx, int);
+static bool cse_rtx_varies_p (const_rtx, bool);
static void remove_invalid_refs (unsigned int);
static void remove_invalid_subreg_refs (unsigned int, unsigned int,
enum machine_mode);
static inline unsigned canon_hash (rtx, enum machine_mode);
static inline unsigned safe_hash (rtx, enum machine_mode);
-static unsigned hash_rtx_string (const char *);
+static inline unsigned hash_rtx_string (const char *);
static rtx canon_reg (rtx, rtx);
static enum rtx_code find_comparison_args (enum rtx_code, rtx *, rtx *,
static void record_jump_equiv (rtx, bool);
static void record_jump_cond (enum rtx_code, enum machine_mode, rtx, rtx,
int);
-static void cse_insn (rtx, rtx);
-static void cse_end_of_basic_block (rtx, struct cse_basic_block_data *,
- int);
+static void cse_insn (rtx);
+static void cse_prescan_path (struct cse_basic_block_data *);
static void invalidate_from_clobbers (rtx);
-static rtx cse_process_notes (rtx, rtx);
-static rtx cse_basic_block (rtx, rtx, struct branch_path *);
+static rtx cse_process_notes (rtx, rtx, bool *);
+static void cse_extended_basic_block (struct cse_basic_block_data *);
static void count_reg_usage (rtx, int *, rtx, int);
static int check_for_label_ref (rtx *, void *);
extern void dump_class (struct table_elt*);
static void flush_hash_table (void);
static bool insn_live_p (rtx, int *);
static bool set_live_p (rtx, rtx, int *);
-static bool dead_libcall_p (rtx, int *);
static int cse_change_cc_mode (rtx *, void *);
static void cse_change_cc_mode_insn (rtx, rtx);
static void cse_change_cc_mode_insns (rtx, rtx, rtx);
-static enum machine_mode cse_cc_succs (basic_block, rtx, rtx, bool);
+static enum machine_mode cse_cc_succs (basic_block, basic_block, rtx, rtx,
+ bool);
\f
#undef RTL_HOOKS_GEN_LOWPART
approx_reg_cost_1 (rtx *xp, void *data)
{
rtx x = *xp;
- int *cost_p = data;
+ int *cost_p = (int *) data;
if (x && REG_P (x))
{
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
? 0
- : rtx_cost (x, outer) * 2);
+ : rtx_cost (x, outer, optimize_this_for_speed_p) * 2);
}
\f
}
#ifdef HAVE_cc0
- prev_insn = 0;
prev_insn_cc0 = 0;
#endif
}
OLD is not changing; NEW is. */
static void
-make_regs_eqv (unsigned int new, unsigned int old)
+make_regs_eqv (unsigned int new_reg, unsigned int old_reg)
{
unsigned int lastr, firstr;
- int q = REG_QTY (old);
+ int q = REG_QTY (old_reg);
struct qty_table_elem *ent;
ent = &qty_table[q];
/* Nothing should become eqv until it has a "non-invalid" qty number. */
- gcc_assert (REGNO_QTY_VALID_P (old));
+ gcc_assert (REGNO_QTY_VALID_P (old_reg));
- REG_QTY (new) = q;
+ REG_QTY (new_reg) = q;
firstr = ent->first_reg;
lastr = ent->last_reg;
that not only can they not be allocated by the compiler, but
they cannot be used in substitutions or canonicalizations
either. */
- && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
- && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
- || (new >= FIRST_PSEUDO_REGISTER
+ && (new_reg >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new_reg) != NO_REGS)
+ && ((new_reg < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new_reg))
+ || (new_reg >= FIRST_PSEUDO_REGISTER
&& (firstr < FIRST_PSEUDO_REGISTER
- || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end
- || (uid_cuid[REGNO_FIRST_UID (new)]
- < cse_basic_block_start))
- && (uid_cuid[REGNO_LAST_UID (new)]
- > uid_cuid[REGNO_LAST_UID (firstr)]))))))
+ || (bitmap_bit_p (cse_ebb_live_out, new_reg)
+ && !bitmap_bit_p (cse_ebb_live_out, firstr))
+ || (bitmap_bit_p (cse_ebb_live_in, new_reg)
+ && !bitmap_bit_p (cse_ebb_live_in, firstr))))))
{
- reg_eqv_table[firstr].prev = new;
- reg_eqv_table[new].next = firstr;
- reg_eqv_table[new].prev = -1;
- ent->first_reg = new;
+ reg_eqv_table[firstr].prev = new_reg;
+ reg_eqv_table[new_reg].next = firstr;
+ reg_eqv_table[new_reg].prev = -1;
+ ent->first_reg = new_reg;
}
else
{
equivalent for anything. */
while (lastr < FIRST_PSEUDO_REGISTER && reg_eqv_table[lastr].prev >= 0
&& (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
- && new >= FIRST_PSEUDO_REGISTER)
+ && new_reg >= FIRST_PSEUDO_REGISTER)
lastr = reg_eqv_table[lastr].prev;
- reg_eqv_table[new].next = reg_eqv_table[lastr].next;
+ reg_eqv_table[new_reg].next = reg_eqv_table[lastr].next;
if (reg_eqv_table[lastr].next >= 0)
- reg_eqv_table[reg_eqv_table[lastr].next].prev = new;
+ reg_eqv_table[reg_eqv_table[lastr].next].prev = new_reg;
else
- qty_table[q].last_reg = new;
- reg_eqv_table[lastr].next = new;
- reg_eqv_table[new].prev = lastr;
+ qty_table[q].last_reg = new_reg;
+ reg_eqv_table[lastr].next = new_reg;
+ reg_eqv_table[new_reg].prev = lastr;
}
}
if (code == REG)
{
unsigned int regno = REGNO (x);
- unsigned int endregno
- = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
- : hard_regno_nregs[regno][GET_MODE (x)]);
+ unsigned int endregno = END_REGNO (x);
unsigned int i;
for (i = regno; i < endregno; i++)
free_element_chain = elt;
}
+/* Same as above, but X is a pseudo-register. */
+
+static void
+remove_pseudo_from_table (rtx x, unsigned int hash)
+{
+ struct table_elt *elt;
+
+ /* Because a pseudo-register can be referenced in more than one
+ mode, we might have to remove more than one table entry. */
+ while ((elt = lookup_for_remove (x, hash, VOIDmode)))
+ remove_from_table (elt, hash);
+}
+
/* Look up X in the hash table and return its table element,
or 0 if X is not in the table.
struct table_elt *p
= lookup (x, SAFE_HASH (x, VOIDmode), GET_MODE (x));
- /* If we are looking for a CONST_INT, the mode doesn't really matter, as
- long as we are narrowing. So if we looked in vain for a mode narrower
- than word_mode before, look for word_mode now. */
- if (p == 0 && code == CONST_INT
- && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode))
- {
- x = copy_rtx (x);
- PUT_MODE (x, word_mode);
- p = lookup (x, SAFE_HASH (x, VOIDmode), word_mode);
- }
-
if (p == 0)
return 0;
/* If X is a hard register, show it is being put in the table. */
if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
- {
- unsigned int regno = REGNO (x);
- unsigned int endregno = regno + hard_regno_nregs[regno][GET_MODE (x)];
- unsigned int i;
-
- for (i = regno; i < endregno; i++)
- SET_HARD_REG_BIT (hard_regs_in_table, i);
- }
+ add_to_hard_reg_set (&hard_regs_in_table, GET_MODE (x), REGNO (x));
/* Put an element for X into the right hash bucket. */
static void
merge_equiv_classes (struct table_elt *class1, struct table_elt *class2)
{
- struct table_elt *elt, *next, *new;
+ struct table_elt *elt, *next, *new_elt;
/* Ensure we start with the head of the classes. */
class1 = class1->first_same_value;
delete_reg_equiv (REGNO (exp));
}
- remove_from_table (elt, hash);
+ if (REG_P (exp) && REGNO (exp) >= FIRST_PSEUDO_REGISTER)
+ remove_pseudo_from_table (exp, hash);
+ else
+ remove_from_table (elt, hash);
if (insert_regs (exp, class1, 0) || need_rehash)
{
rehash_using_reg (exp);
hash = HASH (exp, mode);
}
- new = insert (exp, class1, hash, mode);
- new->in_memory = hash_arg_in_memory;
+ new_elt = insert (exp, class1, hash, mode);
+ new_elt->in_memory = hash_arg_in_memory;
}
}
}
SUBREG_TICKED (regno) = -1;
if (regno >= FIRST_PSEUDO_REGISTER)
- {
- /* Because a register can be referenced in more than one mode,
- we might have to remove more than one table entry. */
- struct table_elt *elt;
-
- while ((elt = lookup_for_remove (x, hash, GET_MODE (x))))
- remove_from_table (elt, hash);
- }
+ remove_pseudo_from_table (x, hash);
else
{
HOST_WIDE_INT in_table
= TEST_HARD_REG_BIT (hard_regs_in_table, regno);
- unsigned int endregno
- = regno + hard_regno_nregs[regno][GET_MODE (x)];
+ unsigned int endregno = END_HARD_REGNO (x);
unsigned int tregno, tendregno, rn;
struct table_elt *p, *next;
continue;
tregno = REGNO (p->exp);
- tendregno
- = tregno + hard_regno_nregs[tregno][GET_MODE (p->exp)];
+ tendregno = END_HARD_REGNO (p->exp);
if (tendregno > regno && tregno < endregno)
remove_from_table (p, hash);
}
continue;
regno = REGNO (p->exp);
- endregno = regno + hard_regno_nregs[regno][GET_MODE (p->exp)];
+ endregno = END_HARD_REGNO (p->exp);
for (i = regno; i < endregno; i++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
return plus_constant (q->exp, offset);
}
\f
+
/* Hash a string. Just add its bytes up. */
static inline unsigned
hash_rtx_string (const char *ps)
return hash;
}
-/* Hash an rtx. We are careful to make sure the value is never negative.
- Equivalent registers hash identically.
- MODE is used in hashing for CONST_INTs only;
- otherwise the mode of X is used.
-
- Store 1 in DO_NOT_RECORD_P if any subexpression is volatile.
-
- If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains
- a MEM rtx which does not have the RTX_UNCHANGING_P bit set.
-
- Note that cse_insn knows that the hash code of a MEM expression
- is just (int) MEM plus the hash code of the address. */
+/* Same as hash_rtx, but call CB on each rtx if it is not NULL.
+ When the callback returns true, we continue with the new rtx. */
unsigned
-hash_rtx (rtx x, enum machine_mode mode, int *do_not_record_p,
- int *hash_arg_in_memory_p, bool have_reg_qty)
+hash_rtx_cb (const_rtx x, enum machine_mode mode,
+ int *do_not_record_p, int *hash_arg_in_memory_p,
+ bool have_reg_qty, hash_rtx_callback_function cb)
{
int i, j;
unsigned hash = 0;
enum rtx_code code;
const char *fmt;
+ enum machine_mode newmode;
+ rtx newx;
/* Used to turn recursion into iteration. We can't rely on GCC's
tail-recursion elimination since we need to keep accumulating values
if (x == 0)
return hash;
+ /* Invoke the callback first. */
+ if (cb != NULL
+ && ((*cb) (x, mode, &newx, &newmode)))
+ {
+ hash += hash_rtx_cb (newx, newmode, do_not_record_p,
+ hash_arg_in_memory_p, have_reg_qty, cb);
+ return hash;
+ }
+
code = GET_CODE (x);
switch (code)
{
{
unsigned int regno = REGNO (x);
- if (!reload_completed)
+ if (do_not_record_p && !reload_completed)
{
/* On some machines, we can't record any non-fixed hard register,
because extending its life will cause reload problems. We
+ (unsigned int) CONST_DOUBLE_HIGH (x));
return hash;
+ case CONST_FIXED:
+ hash += (unsigned int) code + (unsigned int) GET_MODE (x);
+ hash += fixed_hash (CONST_FIXED_VALUE (x));
+ return hash;
+
case CONST_VECTOR:
{
int units;
for (i = 0; i < units; ++i)
{
elt = CONST_VECTOR_ELT (x, i);
- hash += hash_rtx (elt, GET_MODE (elt), do_not_record_p,
- hash_arg_in_memory_p, have_reg_qty);
+ hash += hash_rtx_cb (elt, GET_MODE (elt),
+ do_not_record_p, hash_arg_in_memory_p,
+ have_reg_qty, cb);
}
return hash;
case MEM:
/* We don't record if marked volatile or if BLKmode since we don't
know the size of the move. */
- if (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode)
+ if (do_not_record_p && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
{
*do_not_record_p = 1;
return 0;
case CC0:
case CALL:
case UNSPEC_VOLATILE:
- *do_not_record_p = 1;
- return 0;
+ if (do_not_record_p) {
+ *do_not_record_p = 1;
+ return 0;
+ }
+ else
+ return hash;
+ break;
case ASM_OPERANDS:
- if (MEM_VOLATILE_P (x))
+ if (do_not_record_p && MEM_VOLATILE_P (x))
{
*do_not_record_p = 1;
return 0;
{
for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
{
- hash += (hash_rtx (ASM_OPERANDS_INPUT (x, i),
- GET_MODE (ASM_OPERANDS_INPUT (x, i)),
- do_not_record_p, hash_arg_in_memory_p,
- have_reg_qty)
+ hash += (hash_rtx_cb (ASM_OPERANDS_INPUT (x, i),
+ GET_MODE (ASM_OPERANDS_INPUT (x, i)),
+ do_not_record_p, hash_arg_in_memory_p,
+ have_reg_qty, cb)
+ hash_rtx_string
- (ASM_OPERANDS_INPUT_CONSTRAINT (x, i)));
+ (ASM_OPERANDS_INPUT_CONSTRAINT (x, i)));
}
hash += hash_rtx_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
x = XEXP (x, i);
goto repeat;
}
-
- hash += hash_rtx (XEXP (x, i), 0, do_not_record_p,
- hash_arg_in_memory_p, have_reg_qty);
+
+ hash += hash_rtx_cb (XEXP (x, i), 0, do_not_record_p,
+ hash_arg_in_memory_p,
+ have_reg_qty, cb);
break;
case 'E':
for (j = 0; j < XVECLEN (x, i); j++)
- hash += hash_rtx (XVECEXP (x, i, j), 0, do_not_record_p,
- hash_arg_in_memory_p, have_reg_qty);
+ hash += hash_rtx_cb (XVECEXP (x, i, j), 0, do_not_record_p,
+ hash_arg_in_memory_p,
+ have_reg_qty, cb);
break;
case 's':
return hash;
}
+/* Hash an rtx. We are careful to make sure the value is never negative.
+ Equivalent registers hash identically.
+ MODE is used in hashing for CONST_INTs only;
+ otherwise the mode of X is used.
+
+ Store 1 in DO_NOT_RECORD_P if any subexpression is volatile.
+
+ If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains
+ a MEM rtx which does not have the RTX_UNCHANGING_P bit set.
+
+ Note that cse_insn knows that the hash code of a MEM expression
+ is just (int) MEM plus the hash code of the address. */
+
+unsigned
+hash_rtx (const_rtx x, enum machine_mode mode, int *do_not_record_p,
+ int *hash_arg_in_memory_p, bool have_reg_qty)
+{
+ return hash_rtx_cb (x, mode, do_not_record_p,
+ hash_arg_in_memory_p, have_reg_qty, NULL);
+}
+
/* Hash an rtx X for cse via hash_rtx.
Stores 1 in do_not_record if any subexpression is volatile.
Stores 1 in hash_arg_in_memory if X contains a mem rtx which
If FOR_GCSE is true, we compare X and Y for equivalence for GCSE. */
int
-exp_equiv_p (rtx x, rtx y, int validate, bool for_gcse)
+exp_equiv_p (const_rtx x, const_rtx y, int validate, bool for_gcse)
{
int i, j;
enum rtx_code code;
case CC0:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_FIXED:
return x == y;
case LABEL_REF:
{
unsigned int regno = REGNO (y);
unsigned int i;
- unsigned int endregno
- = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
- : hard_regno_nregs[regno][GET_MODE (y)]);
+ unsigned int endregno = END_REGNO (y);
/* If the quantities are not the same, the expressions are not
equivalent. If there are and we are not to validate, they
executions of the program. 0 means X can be compared reliably
against certain constants or near-constants. */
-static int
-cse_rtx_varies_p (rtx x, int from_alias)
+static bool
+cse_rtx_varies_p (const_rtx x, bool from_alias)
{
/* We need not check for X and the equivalence class being of the same
mode because if X is equivalent to a constant in some mode, it
static void
validate_canon_reg (rtx *xloc, rtx insn)
{
- rtx new = canon_reg (*xloc, insn);
+ if (*xloc)
+ {
+ rtx new_rtx = canon_reg (*xloc, insn);
- /* If replacing pseudo with hard reg or vice versa, ensure the
- insn remains valid. Likewise if the insn has MATCH_DUPs. */
- if (insn != 0 && new != 0)
- validate_change (insn, xloc, new, 1);
- else
- *xloc = new;
+ /* If replacing pseudo with hard reg or vice versa, ensure the
+ insn remains valid. Likewise if the insn has MATCH_DUPs. */
+ gcc_assert (insn && new_rtx);
+ validate_change (insn, xloc, new_rtx, 1);
+ }
}
/* Canonicalize an expression:
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
enum machine_mode mode;
const char *fmt;
int i;
- rtx new = 0;
+ rtx new_rtx = 0;
int changed = 0;
/* Operands of X. */
{
case MEM:
case SUBREG:
- if ((new = equiv_constant (x)) != NULL_RTX)
- return new;
+ if ((new_rtx = equiv_constant (x)) != NULL_RTX)
+ return new_rtx;
return x;
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
{
rtx folded_arg = XEXP (x, i), const_arg;
enum machine_mode mode_arg = GET_MODE (folded_arg);
+
+ switch (GET_CODE (folded_arg))
+ {
+ case MEM:
+ case REG:
+ case SUBREG:
+ const_arg = equiv_constant (folded_arg);
+ break;
+
+ case CONST:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case CONST_FIXED:
+ case CONST_VECTOR:
+ const_arg = folded_arg;
+ break;
+
#ifdef HAVE_cc0
- if (CC0_P (folded_arg))
- folded_arg = prev_insn_cc0, mode_arg = prev_insn_cc0_mode;
+ case CC0:
+ folded_arg = prev_insn_cc0;
+ mode_arg = prev_insn_cc0_mode;
+ const_arg = equiv_constant (folded_arg);
+ break;
#endif
- const_arg = equiv_constant (folded_arg);
+
+ default:
+ folded_arg = fold_rtx (folded_arg, insn);
+ const_arg = equiv_constant (folded_arg);
+ break;
+ }
/* For the first three operands, see if the operand
is constant or equivalent to a constant. */
if (insn == NULL_RTX && !changed)
x = copy_rtx (x);
changed = 1;
- validate_change (insn, &XEXP (x, i), folded_arg, 1);
+ validate_unshare_change (insn, &XEXP (x, i), folded_arg, 1);
}
if (changed)
{
case RTX_UNARY:
{
- int is_const = 0;
-
/* We can't simplify extension ops unless we know the
original mode. */
if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
&& mode_arg0 == VOIDmode)
break;
- /* If we had a CONST, strip it off and put it back later if we
- fold. */
- if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
- is_const = 1, const_arg0 = XEXP (const_arg0, 0);
-
- new = simplify_unary_operation (code, mode,
+ new_rtx = simplify_unary_operation (code, mode,
const_arg0 ? const_arg0 : folded_arg0,
mode_arg0);
- /* NEG of PLUS could be converted into MINUS, but that causes
- expressions of the form
- (CONST (MINUS (CONST_INT) (SYMBOL_REF)))
- which many ports mistakenly treat as LEGITIMATE_CONSTANT_P.
- FIXME: those ports should be fixed. */
- if (new != 0 && is_const
- && GET_CODE (new) == PLUS
- && (GET_CODE (XEXP (new, 0)) == SYMBOL_REF
- || GET_CODE (XEXP (new, 0)) == LABEL_REF)
- && GET_CODE (XEXP (new, 1)) == CONST_INT)
- new = gen_rtx_CONST (mode, new);
}
break;
if (const_arg0 == 0 || const_arg1 == 0)
{
struct table_elt *p0, *p1;
- rtx true_rtx = const_true_rtx, false_rtx = const0_rtx;
+ rtx true_rtx, false_rtx;
enum machine_mode mode_arg1;
-#ifdef FLOAT_STORE_FLAG_VALUE
if (SCALAR_FLOAT_MODE_P (mode))
{
+#ifdef FLOAT_STORE_FLAG_VALUE
true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE
(FLOAT_STORE_FLAG_VALUE (mode), mode));
+#else
+ true_rtx = NULL_RTX;
+#endif
false_rtx = CONST0_RTX (mode);
}
-#endif
+ else
+ {
+ true_rtx = const_true_rtx;
+ false_rtx = const0_rtx;
+ }
code = find_comparison_args (code, &folded_arg0, &folded_arg1,
&mode_arg0, &mode_arg1);
/* If we have a cheaper expression now, use that
and try folding it further, from the top. */
if (cheapest_simplification != x)
- return fold_rtx (cheapest_simplification, insn);
+ return fold_rtx (copy_rtx (cheapest_simplification),
+ insn);
}
}
- /* Some addresses are known to be nonzero. We don't know
- their sign, but equality comparisons are known. */
- if (const_arg1 == const0_rtx
- && nonzero_address_p (folded_arg0))
- {
- if (code == EQ)
- return false_rtx;
- else if (code == NE)
- return true_rtx;
- }
-
/* See if the two operands are the same. */
- if (folded_arg0 == folded_arg1
- || (REG_P (folded_arg0)
- && REG_P (folded_arg1)
- && (REG_QTY (REGNO (folded_arg0))
- == REG_QTY (REGNO (folded_arg1))))
+ if ((REG_P (folded_arg0)
+ && REG_P (folded_arg1)
+ && (REG_QTY (REGNO (folded_arg0))
+ == REG_QTY (REGNO (folded_arg1))))
|| ((p0 = lookup (folded_arg0,
SAFE_HASH (folded_arg0, mode_arg0),
mode_arg0))
SAFE_HASH (folded_arg1, mode_arg0),
mode_arg0))
&& p0->first_same_value == p1->first_same_value))
- {
- /* Sadly two equal NaNs are not equivalent. */
- if (!HONOR_NANS (mode_arg0))
- return ((code == EQ || code == LE || code == GE
- || code == LEU || code == GEU || code == UNEQ
- || code == UNLE || code == UNGE
- || code == ORDERED)
- ? true_rtx : false_rtx);
- /* Take care for the FP compares we can resolve. */
- if (code == UNEQ || code == UNLE || code == UNGE)
- return true_rtx;
- if (code == LTGT || code == LT || code == GT)
- return false_rtx;
- }
+ folded_arg1 = folded_arg0;
/* If FOLDED_ARG0 is a register, see if the comparison we are
doing now is either the same as we did before or the reverse
const_arg1))
|| (REG_P (folded_arg1)
&& (REG_QTY (REGNO (folded_arg1)) == ent->comparison_qty))))
- return (comparison_dominates_p (ent->comparison_code, code)
- ? true_rtx : false_rtx);
+ {
+ if (comparison_dominates_p (ent->comparison_code, code))
+ {
+ if (true_rtx)
+ return true_rtx;
+ else
+ break;
+ }
+ else
+ return false_rtx;
+ }
}
}
}
/* If we are comparing against zero, see if the first operand is
equivalent to an IOR with a constant. If so, we may be able to
determine the result of this comparison. */
-
- if (const_arg1 == const0_rtx)
+ if (const_arg1 == const0_rtx && !const_arg0)
{
rtx y = lookup_as_function (folded_arg0, IOR);
rtx inner_const;
&& (inner_const = equiv_constant (XEXP (y, 1))) != 0
&& GET_CODE (inner_const) == CONST_INT
&& INTVAL (inner_const) != 0)
- {
- int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
- int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
- && (INTVAL (inner_const)
- & ((HOST_WIDE_INT) 1 << sign_bitnum)));
- rtx true_rtx = const_true_rtx, false_rtx = const0_rtx;
-
-#ifdef FLOAT_STORE_FLAG_VALUE
- if (SCALAR_FLOAT_MODE_P (mode))
- {
- true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE
- (FLOAT_STORE_FLAG_VALUE (mode), mode));
- false_rtx = CONST0_RTX (mode);
- }
-#endif
-
- switch (code)
- {
- case EQ:
- return false_rtx;
- case NE:
- return true_rtx;
- case LT: case LE:
- if (has_sign)
- return true_rtx;
- break;
- case GT: case GE:
- if (has_sign)
- return false_rtx;
- break;
- default:
- break;
- }
- }
+ folded_arg0 = gen_rtx_IOR (mode_arg0, XEXP (y, 0), inner_const);
}
{
rtx op0 = const_arg0 ? const_arg0 : folded_arg0;
rtx op1 = const_arg1 ? const_arg1 : folded_arg1;
- new = simplify_relational_operation (code, mode, mode_arg0, op0, op1);
+ new_rtx = simplify_relational_operation (code, mode, mode_arg0, op0, op1);
}
break;
int is_shift
= (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
rtx y, inner_const, new_const;
+ rtx canon_const_arg1 = const_arg1;
enum rtx_code associate_code;
if (is_shift
|| INTVAL (const_arg1) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
- const_arg1 = GEN_INT (INTVAL (const_arg1)
- & (GET_MODE_BITSIZE (mode) - 1));
+ canon_const_arg1 = GEN_INT (INTVAL (const_arg1)
+ & (GET_MODE_BITSIZE (mode)
+ - 1));
else
break;
}
&& exact_log2 (- INTVAL (const_arg1)) >= 0)))
break;
+ /* ??? Vector mode shifts by scalar
+ shift operand are not supported yet. */
+ if (is_shift && VECTOR_MODE_P (mode))
+ break;
+
if (is_shift
&& (INTVAL (inner_const) >= GET_MODE_BITSIZE (mode)
|| INTVAL (inner_const) < 0))
associate_code = (is_shift || code == MINUS ? PLUS : code);
new_const = simplify_binary_operation (associate_code, mode,
- const_arg1, inner_const);
+ canon_const_arg1,
+ inner_const);
if (new_const == 0)
break;
break;
}
- new = simplify_binary_operation (code, mode,
+ new_rtx = simplify_binary_operation (code, mode,
const_arg0 ? const_arg0 : folded_arg0,
const_arg1 ? const_arg1 : folded_arg1);
break;
case RTX_TERNARY:
case RTX_BITFIELD_OPS:
- new = simplify_ternary_operation (code, mode, mode_arg0,
+ new_rtx = simplify_ternary_operation (code, mode, mode_arg0,
const_arg0 ? const_arg0 : folded_arg0,
const_arg1 ? const_arg1 : folded_arg1,
const_arg2 ? const_arg2 : XEXP (x, 2));
break;
}
- return new ? new : x;
+ return new_rtx ? new_rtx : x;
}
\f
/* Return a constant value currently equivalent to X.
if (GET_CODE (x) == SUBREG)
{
- rtx new;
+ enum machine_mode mode = GET_MODE (x);
+ enum machine_mode imode = GET_MODE (SUBREG_REG (x));
+ rtx new_rtx;
/* See if we previously assigned a constant value to this SUBREG. */
- if ((new = lookup_as_function (x, CONST_INT)) != 0
- || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
- return new;
+ if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0
+ || (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0
+ || (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0)
+ return new_rtx;
+
+ /* If we didn't and if doing so makes sense, see if we previously
+ assigned a constant value to the enclosing word mode SUBREG. */
+ if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode)
+ && GET_MODE_SIZE (word_mode) < GET_MODE_SIZE (imode))
+ {
+ int byte = SUBREG_BYTE (x) - subreg_lowpart_offset (mode, word_mode);
+ if (byte >= 0 && (byte % UNITS_PER_WORD) == 0)
+ {
+ rtx y = gen_rtx_SUBREG (word_mode, SUBREG_REG (x), byte);
+ new_rtx = lookup_as_function (y, CONST_INT);
+ if (new_rtx)
+ return gen_lowpart (mode, new_rtx);
+ }
+ }
+ /* Otherwise see if we already have a constant for the inner REG. */
if (REG_P (SUBREG_REG (x))
- && (new = equiv_constant (SUBREG_REG (x))) != 0)
- return simplify_subreg (GET_MODE (x), SUBREG_REG (x),
- GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
+ && (new_rtx = equiv_constant (SUBREG_REG (x))) != 0)
+ return simplify_subreg (mode, new_rtx, imode, SUBREG_BYTE (x));
return 0;
}
First simplify sources and addresses of all assignments
in the instruction, using previously-computed equivalents values.
Then install the new sources and destinations in the table
- of available values.
-
- If LIBCALL_INSN is nonzero, don't record any equivalence made in
- the insn. It means that INSN is inside libcall block. In this
- case LIBCALL_INSN is the corresponding insn with REG_LIBCALL. */
+ of available values. */
/* Data on one SET contained in the instruction. */
ENUM_BITFIELD(machine_mode) mode : 8;
/* A constant equivalent for SET_SRC, if any. */
rtx src_const;
- /* Original SET_SRC value used for libcall notes. */
- rtx orig_src;
/* Hash value of constant equivalent for SET_SRC. */
unsigned src_const_hash;
/* Table entry for constant equivalent for SET_SRC, if any. */
};
static void
-cse_insn (rtx insn, rtx libcall_insn)
+cse_insn (rtx insn)
{
rtx x = PATTERN (insn);
int i;
rtx tem;
int n_sets = 0;
-#ifdef HAVE_cc0
- /* Records what this insn does to set CC0. */
- rtx this_insn_cc0 = 0;
- enum machine_mode this_insn_cc0_mode = VOIDmode;
-#endif
-
rtx src_eqv = 0;
struct table_elt *src_eqv_elt = 0;
int src_eqv_volatile = 0;
struct set *sets = (struct set *) 0;
this_insn = insn;
+#ifdef HAVE_cc0
+ /* Records what this insn does to set CC0. */
+ this_insn_cc0 = 0;
+ this_insn_cc0_mode = VOIDmode;
+#endif
/* Find all the SETs and CLOBBERs in this instruction.
Record all the SETs in the array `set' and count them.
if (GET_CODE (x) == SET)
{
- sets = alloca (sizeof (struct set));
+ sets = XALLOCA (struct set);
sets[0].rtl = x;
/* Ignore SETs that are unconditional jumps.
{
int lim = XVECLEN (x, 0);
- sets = alloca (lim * sizeof (struct set));
+ sets = XALLOCAVEC (struct set, lim);
/* Find all regs explicitly clobbered in this insn,
and ensure they are not replaced with any other regs
This does nothing when a register is clobbered
because we have already invalidated the reg. */
if (MEM_P (XEXP (y, 0)))
- canon_reg (XEXP (y, 0), NULL_RTX);
+ canon_reg (XEXP (y, 0), insn);
}
else if (GET_CODE (y) == USE
&& ! (REG_P (XEXP (y, 0))
&& REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
- canon_reg (y, NULL_RTX);
+ canon_reg (y, insn);
else if (GET_CODE (y) == CALL)
{
/* The result of apply_change_group can be ignored; see
else if (GET_CODE (x) == CLOBBER)
{
if (MEM_P (XEXP (x, 0)))
- canon_reg (XEXP (x, 0), NULL_RTX);
+ canon_reg (XEXP (x, 0), insn);
}
/* Canonicalize a USE of a pseudo register or memory location. */
else if (GET_CODE (x) == USE
&& ! (REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
- canon_reg (XEXP (x, 0), NULL_RTX);
+ canon_reg (XEXP (x, 0), insn);
else if (GET_CODE (x) == CALL)
{
/* The result of apply_change_group can be ignored; see canon_reg. */
&& (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
|| GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
{
- src_eqv = fold_rtx (canon_reg (XEXP (tem, 0), NULL_RTX), insn);
- XEXP (tem, 0) = src_eqv;
+ /* The result of apply_change_group can be ignored; see canon_reg. */
+ canon_reg (XEXP (tem, 0), insn);
+ apply_change_group ();
+ src_eqv = fold_rtx (XEXP (tem, 0), insn);
+ XEXP (tem, 0) = copy_rtx (src_eqv);
+ df_notes_rescan (insn);
}
/* Canonicalize sources and addresses of destinations.
{
rtx dest = SET_DEST (sets[i].rtl);
rtx src = SET_SRC (sets[i].rtl);
- rtx new = canon_reg (src, insn);
+ rtx new_rtx = canon_reg (src, insn);
- sets[i].orig_src = src;
- validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
+ validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1);
if (GET_CODE (dest) == ZERO_EXTRACT)
{
enum machine_mode wider_mode;
for (wider_mode = GET_MODE_WIDER_MODE (mode);
- GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
+ wider_mode != VOIDmode
+ && GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
&& src_related == 0;
wider_mode = GET_MODE_WIDER_MODE (wider_mode))
{
const_elt; const_elt = const_elt->next_same_value)
if (REG_P (const_elt->exp))
{
- src_related = gen_lowpart (mode,
- const_elt->exp);
+ src_related = gen_lowpart (mode, const_elt->exp);
break;
}
}
larger_elt; larger_elt = larger_elt->next_same_value)
if (REG_P (larger_elt->exp))
{
- src_related = gen_lowpart (mode,
- larger_elt->exp);
+ src_related = gen_lowpart (mode, larger_elt->exp);
break;
}
src_related_cost, src_related_regcost) <= 0
&& preferable (src_eqv_cost, src_eqv_regcost,
src_elt_cost, src_elt_regcost) <= 0)
- trial = copy_rtx (src_eqv_here), src_eqv_cost = MAX_COST;
+ trial = src_eqv_here, src_eqv_cost = MAX_COST;
else if (src_related
&& preferable (src_related_cost, src_related_regcost,
src_elt_cost, src_elt_regcost) <= 0)
- trial = copy_rtx (src_related), src_related_cost = MAX_COST;
+ trial = src_related, src_related_cost = MAX_COST;
else
{
- trial = copy_rtx (elt->exp);
+ trial = elt->exp;
elt = elt->next_same_value;
src_elt_cost = MAX_COST;
}
+ /* Avoid creation of overlapping memory moves. */
+ if (MEM_P (trial) && MEM_P (SET_DEST (sets[i].rtl)))
+ {
+ rtx src, dest;
+
+ /* BLKmode moves are not handled by cse anyway. */
+ if (GET_MODE (trial) == BLKmode)
+ break;
+
+ src = canon_rtx (trial);
+ dest = canon_rtx (SET_DEST (sets[i].rtl));
+
+ if (!MEM_P (src) || !MEM_P (dest)
+ || !nonoverlapping_memrefs_p (src, dest))
+ break;
+ }
+
/* We don't normally have an insn matching (set (pc) (pc)), so
check for this separately here. We will delete such an
insn below.
continue;
SET_SRC (sets[i].rtl) = trial;
- cse_jumps_altered = 1;
+ cse_jumps_altered = true;
break;
}
;
/* Look for a substitution that makes a valid insn. */
- else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
+ else if (validate_unshare_change
+ (insn, &SET_SRC (sets[i].rtl), trial, 0))
{
- rtx new = canon_reg (SET_SRC (sets[i].rtl), insn);
-
- /* If we just made a substitution inside a libcall, then we
- need to make the same substitution in any notes attached
- to the RETVAL insn. */
- if (libcall_insn
- && (REG_P (sets[i].orig_src)
- || GET_CODE (sets[i].orig_src) == SUBREG
- || MEM_P (sets[i].orig_src)))
- {
- rtx note = find_reg_equal_equiv_note (libcall_insn);
- if (note != 0)
- XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0),
- sets[i].orig_src,
- copy_rtx (new));
- }
+ rtx new_rtx = canon_reg (SET_SRC (sets[i].rtl), insn);
/* The result of apply_change_group can be ignored; see
canon_reg. */
- validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
+ validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1);
apply_change_group ();
+
break;
}
/* Record the actual constant value in a REG_EQUAL note,
making a new one if one does not already exist. */
set_unique_reg_note (insn, REG_EQUAL, src_const);
+ df_notes_rescan (insn);
}
}
{
/* One less use of the label this insn used to jump to. */
delete_insn_and_edges (insn);
- cse_jumps_altered = 1;
+ cse_jumps_altered = true;
/* No more processing for this set. */
sets[i].rtl = 0;
}
else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF
&& !LABEL_REF_NONLOCAL_P (src))
{
- /* Now emit a BARRIER after the unconditional jump. */
- if (NEXT_INSN (insn) == 0
- || !BARRIER_P (NEXT_INSN (insn)))
- emit_barrier_after (insn);
-
/* We reemit the jump in as many cases as possible just in
case the form of an unconditional jump is significantly
different than a computed jump or conditional jump.
and hope for the best. */
if (n_sets == 1)
{
- rtx new, note;
+ rtx new_rtx, note;
- new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
- JUMP_LABEL (new) = XEXP (src, 0);
+ new_rtx = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
+ JUMP_LABEL (new_rtx) = XEXP (src, 0);
LABEL_NUSES (XEXP (src, 0))++;
/* Make sure to copy over REG_NON_LOCAL_GOTO. */
if (note)
{
XEXP (note, 1) = NULL_RTX;
- REG_NOTES (new) = note;
+ REG_NOTES (new_rtx) = note;
}
delete_insn_and_edges (insn);
- insn = new;
-
- /* Now emit a BARRIER after the unconditional jump. */
- if (NEXT_INSN (insn) == 0
- || !BARRIER_P (NEXT_INSN (insn)))
- emit_barrier_after (insn);
+ insn = new_rtx;
}
else
INSN_CODE (insn) = -1;
- /* Do not bother deleting any unreachable code,
- let jump/flow do that. */
-
- cse_jumps_altered = 1;
+ /* Do not bother deleting any unreachable code, let jump do it. */
+ cse_jumps_altered = true;
sets[i].rtl = 0;
}
if (sets[i].src_elt == 0)
{
- /* Don't put a hard register source into the table if this is
- the last insn of a libcall. In this case, we only need
- to put src_eqv_elt in src_elt. */
- if (! find_reg_note (insn, REG_RETVAL, NULL_RTX))
- {
- struct table_elt *elt;
+ struct table_elt *elt;
- /* Note that these insert_regs calls cannot remove
- any of the src_elt's, because they would have failed to
- match if not still valid. */
- if (insert_regs (src, classp, 0))
- {
- rehash_using_reg (src);
- sets[i].src_hash = HASH (src, mode);
- }
- elt = insert (src, classp, sets[i].src_hash, mode);
- elt->in_memory = sets[i].src_in_memory;
- sets[i].src_elt = classp = elt;
+ /* Note that these insert_regs calls cannot remove
+ any of the src_elt's, because they would have failed to
+ match if not still valid. */
+ if (insert_regs (src, classp, 0))
+ {
+ rehash_using_reg (src);
+ sets[i].src_hash = HASH (src, mode);
}
- else
- sets[i].src_elt = classp;
+ elt = insert (src, classp, sets[i].src_hash, mode);
+ elt->in_memory = sets[i].src_in_memory;
+ sets[i].src_elt = classp = elt;
}
if (sets[i].src_const && sets[i].src_const_elt == 0
&& src != sets[i].src_const
{
if (insert_regs (x, NULL, 0))
{
+ rtx dest = SET_DEST (sets[i].rtl);
+
rehash_using_reg (x);
hash = HASH (x, mode);
+ sets[i].dest_hash = HASH (dest, GET_MODE (dest));
}
elt = insert (x, NULL, hash, mode);
}
if (CALL_P (insn))
{
- if (! CONST_OR_PURE_CALL_P (insn))
+ if (!(RTL_CONST_OR_PURE_CALL_P (insn)))
invalidate_memory ();
invalidate_for_call ();
}
&& MEM_VOLATILE_P (PATTERN (insn)))
flush_hash_table ();
+ /* Don't cse over a call to setjmp; on some machines (eg VAX)
+ the regs restored by the longjmp come from a later time
+ than the setjmp. */
+ if (CALL_P (insn) && find_reg_note (insn, REG_SETJMP, NULL))
+ {
+ flush_hash_table ();
+ goto done;
+ }
+
/* Make sure registers mentioned in destinations
are safe for use in an expression to be inserted.
This removes from the hash table
but it knows that reg_tick has been incremented, and
it leaves reg_in_table as -1 . */
unsigned int regno = REGNO (x);
- unsigned int endregno
- = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
- : hard_regno_nregs[regno][GET_MODE (x)]);
+ unsigned int endregno = END_REGNO (x);
unsigned int i;
for (i = regno; i < endregno; i++)
size of it, and can't be sure that other BLKmode values
have the same or smaller size. */
|| GET_MODE (dest) == BLKmode
- /* Don't record values of destinations set inside a libcall block
- since we might delete the libcall. Things should have been set
- up so we won't want to reuse such a value, but we play it safe
- here. */
- || libcall_insn
/* If we didn't put a REG_EQUAL value or a source into the hash
table, there is no point is recording DEST. */
|| sets[i].src_elt == 0
then be used in the sequel and we may be changing a two-operand insn
into a three-operand insn.
- Also do not do this if we are operating on a copy of INSN.
-
- Also don't do this if INSN ends a libcall; this would cause an unrelated
- register to be set in the middle of a libcall, and we then get bad code
- if the libcall is deleted. */
+ Also do not do this if we are operating on a copy of INSN. */
if (n_sets == 1 && sets[0].rtl && REG_P (SET_DEST (sets[0].rtl))
&& NEXT_INSN (PREV_INSN (insn)) == insn
int src_q = REG_QTY (REGNO (SET_SRC (sets[0].rtl)));
struct qty_table_elem *src_ent = &qty_table[src_q];
- if ((src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl)))
- && ! find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ if (src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl)))
{
- rtx prev = insn;
/* Scan for the previous nonnote insn, but stop at a basic
block boundary. */
+ rtx prev = insn;
+ rtx bb_head = BB_HEAD (BLOCK_FOR_INSN (insn));
do
{
prev = PREV_INSN (prev);
}
- while (prev && NOTE_P (prev)
- && NOTE_LINE_NUMBER (prev) != NOTE_INSN_BASIC_BLOCK);
+ while (prev != bb_head && NOTE_P (prev));
/* Do not swap the registers around if the previous instruction
attaches a REG_EQUIV note to REG1.
This section previously turned the REG_EQUIV into a REG_EQUAL
note. We cannot do that because REG_EQUIV may provide an
uninitialized stack slot when REG_PARM_STACK_SPACE is used. */
-
- if (prev != 0 && NONJUMP_INSN_P (prev)
+ if (NONJUMP_INSN_P (prev)
&& GET_CODE (PATTERN (prev)) == SET
&& SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl)
&& ! find_reg_note (prev, REG_EQUIV, NULL_RTX))
}
}
- /* If this is a conditional jump insn, record any known equivalences due to
- the condition being tested. */
-
- if (n_sets == 1 && any_condjump_p (insn))
- record_jump_equiv (insn, false);
-
-#ifdef HAVE_cc0
- /* If the previous insn set CC0 and this insn no longer references CC0,
- delete the previous insn. Here we use the fact that nothing expects CC0
- to be valid over an insn, which is true until the final pass. */
- if (prev_insn && NONJUMP_INSN_P (prev_insn)
- && (tem = single_set (prev_insn)) != 0
- && SET_DEST (tem) == cc0_rtx
- && ! reg_mentioned_p (cc0_rtx, x))
- delete_insn_and_edges (prev_insn);
-
- prev_insn_cc0 = this_insn_cc0;
- prev_insn_cc0_mode = this_insn_cc0_mode;
- prev_insn = insn;
-#endif
+done:;
}
\f
/* Remove from the hash table all expressions that reference memory. */
Return the replacement for X. */
static rtx
-cse_process_notes (rtx x, rtx object)
+cse_process_notes_1 (rtx x, rtx object, bool *changed)
{
enum rtx_code code = GET_CODE (x);
const char *fmt = GET_RTX_FORMAT (code);
case SYMBOL_REF:
case LABEL_REF:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case PC:
case CC0:
case MEM:
validate_change (x, &XEXP (x, 0),
- cse_process_notes (XEXP (x, 0), x), 0);
+ cse_process_notes (XEXP (x, 0), x, changed), 0);
return x;
case EXPR_LIST:
case INSN_LIST:
if (REG_NOTE_KIND (x) == REG_EQUAL)
- XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
+ XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX, changed);
if (XEXP (x, 1))
- XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
+ XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX, changed);
return x;
case SIGN_EXTEND:
case ZERO_EXTEND:
case SUBREG:
{
- rtx new = cse_process_notes (XEXP (x, 0), object);
+ rtx new_rtx = cse_process_notes (XEXP (x, 0), object, changed);
/* We don't substitute VOIDmode constants into these rtx,
since they would impede folding. */
- if (GET_MODE (new) != VOIDmode)
- validate_change (object, &XEXP (x, 0), new, 0);
+ if (GET_MODE (new_rtx) != VOIDmode)
+ validate_change (object, &XEXP (x, 0), new_rtx, 0);
return x;
}
&& (CONSTANT_P (ent->const_rtx)
|| REG_P (ent->const_rtx)))
{
- rtx new = gen_lowpart (GET_MODE (x), ent->const_rtx);
- if (new)
- return copy_rtx (new);
+ rtx new_rtx = gen_lowpart (GET_MODE (x), ent->const_rtx);
+ if (new_rtx)
+ return copy_rtx (new_rtx);
}
}
for (i = 0; i < GET_RTX_LENGTH (code); i++)
if (fmt[i] == 'e')
validate_change (object, &XEXP (x, i),
- cse_process_notes (XEXP (x, i), object), 0);
+ cse_process_notes (XEXP (x, i), object, changed), 0);
return x;
}
+
+static rtx
+cse_process_notes (rtx x, rtx object, bool *changed)
+{
+ rtx new_rtx = cse_process_notes_1 (x, object, changed);
+ if (new_rtx != x)
+ *changed = true;
+ return new_rtx;
+}
+
\f
-/* Find the end of INSN's basic block and return its range,
- the total number of SETs in all the insns of the block, the last insn of the
- block, and the branch path.
+/* Find a path in the CFG, starting with FIRST_BB to perform CSE on.
- The branch path indicates which branches should be followed. If a nonzero
- path size is specified, the block should be rescanned and a different set
- of branches will be taken. The branch path is only used if
- FLAG_CSE_FOLLOW_JUMPS is nonzero.
+ DATA is a pointer to a struct cse_basic_block_data, that is used to
+ describe the path.
+ It is filled with a queue of basic blocks, starting with FIRST_BB
+ and following a trace through the CFG.
+
+ If all paths starting at FIRST_BB have been followed, or no new path
+ starting at FIRST_BB can be constructed, this function returns FALSE.
+ Otherwise, DATA->path is filled and the function returns TRUE indicating
+ that a path to follow was found.
- DATA is a pointer to a struct cse_basic_block_data, defined below, that is
- used to describe the block. It is filled in with the information about
- the current block. The incoming structure's branch path, if any, is used
- to construct the output branch path. */
+ If FOLLOW_JUMPS is false, the maximum path length is 1 and the only
+ block in the path will be FIRST_BB. */
-static void
-cse_end_of_basic_block (rtx insn, struct cse_basic_block_data *data,
- int follow_jumps)
+static bool
+cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
+ int follow_jumps)
{
- rtx p = insn, q;
- int nsets = 0;
- int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
- rtx next = INSN_P (insn) ? insn : next_real_insn (insn);
- int path_size = data->path_size;
- int path_entry = 0;
- int i;
+ basic_block bb;
+ edge e;
+ int path_size;
+
+ SET_BIT (cse_visited_basic_blocks, first_bb->index);
+
+ /* See if there is a previous path. */
+ path_size = data->path_size;
+
+ /* There is a previous path. Make sure it started with FIRST_BB. */
+ if (path_size)
+ gcc_assert (data->path[0].bb == first_bb);
- /* Update the previous branch path, if any. If the last branch was
- previously PATH_TAKEN, mark it PATH_NOT_TAKEN.
- If it was previously PATH_NOT_TAKEN,
- shorten the path by one and look at the previous branch. We know that
- at least one branch must have been taken if PATH_SIZE is nonzero. */
- while (path_size > 0)
+ /* There was only one basic block in the last path. Clear the path and
+ return, so that paths starting at another basic block can be tried. */
+ if (path_size == 1)
{
- if (data->path[path_size - 1].status != PATH_NOT_TAKEN)
- {
- data->path[path_size - 1].status = PATH_NOT_TAKEN;
- break;
- }
- else
- path_size--;
+ path_size = 0;
+ goto done;
}
- /* If the first instruction is marked with QImode, that means we've
- already processed this block. Our caller will look at DATA->LAST
- to figure out where to go next. We want to return the next block
- in the instruction stream, not some branched-to block somewhere
- else. We accomplish this by pretending our called forbid us to
- follow jumps. */
- if (GET_MODE (insn) == QImode)
- follow_jumps = 0;
-
- /* Scan to end of this basic block. */
- while (p && !LABEL_P (p))
+ /* If the path was empty from the beginning, construct a new path. */
+ if (path_size == 0)
+ data->path[path_size++].bb = first_bb;
+ else
{
- /* Don't cse over a call to setjmp; on some machines (eg VAX)
- the regs restored by the longjmp come from
- a later time than the setjmp. */
- if (PREV_INSN (p) && CALL_P (PREV_INSN (p))
- && find_reg_note (PREV_INSN (p), REG_SETJMP, NULL))
- break;
+ /* Otherwise, path_size must be equal to or greater than 2, because
+ a previous path exists that is at least two basic blocks long.
- /* A PARALLEL can have lots of SETs in it,
- especially if it is really an ASM_OPERANDS. */
- if (INSN_P (p) && GET_CODE (PATTERN (p)) == PARALLEL)
- nsets += XVECLEN (PATTERN (p), 0);
- else if (!NOTE_P (p))
- nsets += 1;
-
- /* Ignore insns made by CSE; they cannot affect the boundaries of
- the basic block. */
-
- if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
- high_cuid = INSN_CUID (p);
- if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
- low_cuid = INSN_CUID (p);
-
- /* See if this insn is in our branch path. If it is and we are to
- take it, do so. */
- if (path_entry < path_size && data->path[path_entry].branch == p)
+ Update the previous branch path, if any. If the last branch was
+ previously along the branch edge, take the fallthrough edge now. */
+ while (path_size >= 2)
{
- if (data->path[path_entry].status != PATH_NOT_TAKEN)
- p = JUMP_LABEL (p);
-
- /* Point to next entry in path, if any. */
- path_entry++;
- }
-
- /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
- was specified, we haven't reached our maximum path length, there are
- insns following the target of the jump, this is the only use of the
- jump label, and the target label is preceded by a BARRIER. */
- else if (follow_jumps
- && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH) - 1
- && JUMP_P (p)
- && GET_CODE (PATTERN (p)) == SET
- && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
- && JUMP_LABEL (p) != 0
- && LABEL_NUSES (JUMP_LABEL (p)) == 1
- && NEXT_INSN (JUMP_LABEL (p)) != 0)
- {
- for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
- if ((!NOTE_P (q)
- || (PREV_INSN (q) && CALL_P (PREV_INSN (q))
- && find_reg_note (PREV_INSN (q), REG_SETJMP, NULL)))
- && (!LABEL_P (q) || LABEL_NUSES (q) != 0))
- break;
-
- /* If we ran into a BARRIER, this code is an extension of the
- basic block when the branch is taken. */
- if (follow_jumps && q != 0 && BARRIER_P (q))
+ basic_block last_bb_in_path, previous_bb_in_path;
+ edge e;
+
+ --path_size;
+ last_bb_in_path = data->path[path_size].bb;
+ previous_bb_in_path = data->path[path_size - 1].bb;
+
+ /* If we previously followed a path along the branch edge, try
+ the fallthru edge now. */
+ if (EDGE_COUNT (previous_bb_in_path->succs) == 2
+ && any_condjump_p (BB_END (previous_bb_in_path))
+ && (e = find_edge (previous_bb_in_path, last_bb_in_path))
+ && e == BRANCH_EDGE (previous_bb_in_path))
{
- /* Don't allow ourself to keep walking around an
- always-executed loop. */
- if (next_real_insn (q) == next)
+ bb = FALLTHRU_EDGE (previous_bb_in_path)->dest;
+ if (bb != EXIT_BLOCK_PTR
+ && single_pred_p (bb)
+ /* We used to assert here that we would only see blocks
+ that we have not visited yet. But we may end up
+ visiting basic blocks twice if the CFG has changed
+ in this run of cse_main, because when the CFG changes
+ the topological sort of the CFG also changes. A basic
+ blocks that previously had more than two predecessors
+ may now have a single predecessor, and become part of
+ a path that starts at another basic block.
+
+ We still want to visit each basic block only once, so
+ halt the path here if we have already visited BB. */
+ && !TEST_BIT (cse_visited_basic_blocks, bb->index))
{
- p = NEXT_INSN (p);
- continue;
- }
-
- /* Similarly, don't put a branch in our path more than once. */
- for (i = 0; i < path_entry; i++)
- if (data->path[i].branch == p)
+ SET_BIT (cse_visited_basic_blocks, bb->index);
+ data->path[path_size++].bb = bb;
break;
+ }
+ }
- if (i != path_entry)
- break;
+ data->path[path_size].bb = NULL;
+ }
- data->path[path_entry].branch = p;
- data->path[path_entry++].status = PATH_TAKEN;
+ /* If only one block remains in the path, bail. */
+ if (path_size == 1)
+ {
+ path_size = 0;
+ goto done;
+ }
+ }
- /* This branch now ends our path. It was possible that we
- didn't see this branch the last time around (when the
- insn in front of the target was a JUMP_INSN that was
- turned into a no-op). */
- path_size = path_entry;
+ /* Extend the path if possible. */
+ if (follow_jumps)
+ {
+ bb = data->path[path_size - 1].bb;
+ while (bb && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH))
+ {
+ if (single_succ_p (bb))
+ e = single_succ_edge (bb);
+ else if (EDGE_COUNT (bb->succs) == 2
+ && any_condjump_p (BB_END (bb)))
+ {
+ /* First try to follow the branch. If that doesn't lead
+ to a useful path, follow the fallthru edge. */
+ e = BRANCH_EDGE (bb);
+ if (!single_pred_p (e->dest))
+ e = FALLTHRU_EDGE (bb);
+ }
+ else
+ e = NULL;
- p = JUMP_LABEL (p);
- /* Mark block so we won't scan it again later. */
- PUT_MODE (NEXT_INSN (p), QImode);
+ if (e && e->dest != EXIT_BLOCK_PTR
+ && single_pred_p (e->dest)
+ /* Avoid visiting basic blocks twice. The large comment
+ above explains why this can happen. */
+ && !TEST_BIT (cse_visited_basic_blocks, e->dest->index))
+ {
+ basic_block bb2 = e->dest;
+ SET_BIT (cse_visited_basic_blocks, bb2->index);
+ data->path[path_size++].bb = bb2;
+ bb = bb2;
}
+ else
+ bb = NULL;
}
- p = NEXT_INSN (p);
}
- data->low_cuid = low_cuid;
- data->high_cuid = high_cuid;
- data->nsets = nsets;
- data->last = p;
-
- /* If all jumps in the path are not taken, set our path length to zero
- so a rescan won't be done. */
- for (i = path_size - 1; i >= 0; i--)
- if (data->path[i].status != PATH_NOT_TAKEN)
- break;
-
- if (i == -1)
- data->path_size = 0;
- else
- data->path_size = path_size;
-
- /* End the current branch path. */
- data->path[path_size].branch = 0;
+done:
+ data->path_size = path_size;
+ return path_size != 0;
}
\f
-/* Perform cse on the instructions of a function.
- F is the first instruction.
- NREGS is one plus the highest pseudo-reg number used in the instruction.
-
- Returns 1 if jump_optimize should be redone due to simplifications
- in conditional jump instructions. */
+/* Dump the path in DATA to file F. NSETS is the number of sets
+ in the path. */
-int
-cse_main (rtx f, int nregs)
+static void
+cse_dump_path (struct cse_basic_block_data *data, int nsets, FILE *f)
{
- struct cse_basic_block_data val;
- rtx insn = f;
- int i;
-
- init_cse_reg_info (nregs);
-
- val.path = XNEWVEC (struct branch_path, PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+ int path_entry;
- cse_jumps_altered = 0;
- recorded_label_ref = 0;
- constant_pool_entries_cost = 0;
- constant_pool_entries_regcost = 0;
- val.path_size = 0;
- rtl_hooks = cse_rtl_hooks;
+ fprintf (f, ";; Following path with %d sets: ", nsets);
+ for (path_entry = 0; path_entry < data->path_size; path_entry++)
+ fprintf (f, "%d ", (data->path[path_entry].bb)->index);
+ fputc ('\n', dump_file);
+ fflush (f);
+}
- init_recog ();
- init_alias_analysis ();
+\f
+/* Return true if BB has exception handling successor edges. */
- reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs);
+static bool
+have_eh_succ_edges (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
- /* Find the largest uid. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->flags & EDGE_EH)
+ return true;
- max_uid = get_max_uid ();
- uid_cuid = XCNEWVEC (int, max_uid + 1);
+ return false;
+}
- /* Compute the mapping from uids to cuids.
- CUIDs are numbers assigned to insns, like uids,
- except that cuids increase monotonically through the code. */
+\f
+/* Scan to the end of the path described by DATA. Return an estimate of
+ the total number of SETs of all insns in the path. */
- for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
- INSN_CUID (insn) = ++i;
+static void
+cse_prescan_path (struct cse_basic_block_data *data)
+{
+ int nsets = 0;
+ int path_size = data->path_size;
+ int path_entry;
- /* Loop over basic blocks.
- Compute the maximum number of qty's needed for each basic block
- (which is 2 for each SET). */
- insn = f;
- while (insn)
+ /* Scan to end of each basic block in the path. */
+ for (path_entry = 0; path_entry < path_size; path_entry++)
{
- cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps);
+ basic_block bb;
+ rtx insn;
- /* If this basic block was already processed or has no sets, skip it. */
- if (val.nsets == 0 || GET_MODE (insn) == QImode)
- {
- PUT_MODE (insn, VOIDmode);
- insn = (val.last ? NEXT_INSN (val.last) : 0);
- val.path_size = 0;
- continue;
- }
+ bb = data->path[path_entry].bb;
- cse_basic_block_start = val.low_cuid;
- cse_basic_block_end = val.high_cuid;
- max_qty = val.nsets * 2;
-
- if (dump_file)
- fprintf (dump_file, ";; Processing block from %d to %d, %d sets.\n",
- INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
- val.nsets);
-
- /* Make MAX_QTY bigger to give us room to optimize
- past the end of this basic block, if that should prove useful. */
- if (max_qty < 500)
- max_qty = 500;
-
- /* If this basic block is being extended by following certain jumps,
- (see `cse_end_of_basic_block'), we reprocess the code from the start.
- Otherwise, we start after this basic block. */
- if (val.path_size > 0)
- cse_basic_block (insn, val.last, val.path);
- else
+ FOR_BB_INSNS (bb, insn)
{
- int old_cse_jumps_altered = cse_jumps_altered;
- rtx temp;
-
- /* When cse changes a conditional jump to an unconditional
- jump, we want to reprocess the block, since it will give
- us a new branch path to investigate. */
- cse_jumps_altered = 0;
- temp = cse_basic_block (insn, val.last, val.path);
- if (cse_jumps_altered == 0 || flag_cse_follow_jumps)
- insn = temp;
-
- cse_jumps_altered |= old_cse_jumps_altered;
+ if (!INSN_P (insn))
+ continue;
+
+ /* A PARALLEL can have lots of SETs in it,
+ especially if it is really an ASM_OPERANDS. */
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ nsets += XVECLEN (PATTERN (insn), 0);
+ else
+ nsets += 1;
}
}
- /* Clean up. */
- end_alias_analysis ();
- free (uid_cuid);
- free (reg_eqv_table);
- free (val.path);
- rtl_hooks = general_rtl_hooks;
-
- return cse_jumps_altered || recorded_label_ref;
+ data->nsets = nsets;
}
+\f
+/* Process a single extended basic block described by EBB_DATA. */
-/* Process a single basic block. FROM and TO and the limits of the basic
- block. NEXT_BRANCH points to the branch path when following jumps or
- a null path when not following jumps. */
-
-static rtx
-cse_basic_block (rtx from, rtx to, struct branch_path *next_branch)
+static void
+cse_extended_basic_block (struct cse_basic_block_data *ebb_data)
{
- rtx insn;
- int to_usage = 0;
- rtx libcall_insn = NULL_RTX;
+ int path_size = ebb_data->path_size;
+ int path_entry;
int num_insns = 0;
- int no_conflict = 0;
/* Allocate the space needed by qty_table. */
qty_table = XNEWVEC (struct qty_table_elem, max_qty);
new_basic_block ();
+ cse_ebb_live_in = df_get_live_in (ebb_data->path[0].bb);
+ cse_ebb_live_out = df_get_live_out (ebb_data->path[path_size - 1].bb);
+ for (path_entry = 0; path_entry < path_size; path_entry++)
+ {
+ basic_block bb;
+ rtx insn;
- /* TO might be a label. If so, protect it from being deleted. */
- if (to != 0 && LABEL_P (to))
- ++LABEL_NUSES (to);
+ bb = ebb_data->path[path_entry].bb;
- for (insn = from; insn != to; insn = NEXT_INSN (insn))
- {
- enum rtx_code code = GET_CODE (insn);
-
- /* If we have processed 1,000 insns, flush the hash table to
- avoid extreme quadratic behavior. We must not include NOTEs
- in the count since there may be more of them when generating
- debugging information. If we clear the table at different
- times, code generated with -g -O might be different than code
- generated with -O but not -g.
-
- ??? This is a real kludge and needs to be done some other way.
- Perhaps for 2.9. */
- if (code != NOTE && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
+ /* Invalidate recorded information for eh regs if there is an EH
+ edge pointing to that bb. */
+ if (bb_has_eh_pred (bb))
{
- flush_hash_table ();
- num_insns = 0;
+ df_ref *def_rec;
+
+ for (def_rec = df_get_artificial_defs (bb->index); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ if (DF_REF_FLAGS (def) & DF_REF_AT_TOP)
+ invalidate (DF_REF_REG (def), GET_MODE (DF_REF_REG (def)));
+ }
}
- /* See if this is a branch that is part of the path. If so, and it is
- to be taken, do so. */
- if (next_branch->branch == insn)
+ FOR_BB_INSNS (bb, insn)
{
- enum taken status = next_branch++->status;
- if (status != PATH_NOT_TAKEN)
+ optimize_this_for_speed_p = optimize_bb_for_speed_p (bb);
+ /* If we have processed 1,000 insns, flush the hash table to
+ avoid extreme quadratic behavior. We must not include NOTEs
+ in the count since there may be more of them when generating
+ debugging information. If we clear the table at different
+ times, code generated with -g -O might be different than code
+ generated with -O but not -g.
+
+ FIXME: This is a real kludge and needs to be done some other
+ way. */
+ if (INSN_P (insn)
+ && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
+ {
+ flush_hash_table ();
+ num_insns = 0;
+ }
+
+ if (INSN_P (insn))
{
- gcc_assert (status == PATH_TAKEN);
- if (any_condjump_p (insn))
- record_jump_equiv (insn, true);
+ /* Process notes first so we have all notes in canonical forms
+ when looking for duplicate operations. */
+ if (REG_NOTES (insn))
+ {
+ bool changed = false;
+ REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn),
+ NULL_RTX, &changed);
+ if (changed)
+ df_notes_rescan (insn);
+ }
+
+ cse_insn (insn);
+
+ /* If we haven't already found an insn where we added a LABEL_REF,
+ check this one. */
+ if (INSN_P (insn) && !recorded_label_ref
+ && for_each_rtx (&PATTERN (insn), check_for_label_ref,
+ (void *) insn))
+ recorded_label_ref = true;
- /* Set the last insn as the jump insn; it doesn't affect cc0.
- Then follow this branch. */
#ifdef HAVE_cc0
- prev_insn_cc0 = 0;
- prev_insn = insn;
+ /* If the previous insn set CC0 and this insn no longer
+ references CC0, delete the previous insn. Here we use
+ fact that nothing expects CC0 to be valid over an insn,
+ which is true until the final pass. */
+ {
+ rtx prev_insn, tem;
+
+ prev_insn = PREV_INSN (insn);
+ if (prev_insn && NONJUMP_INSN_P (prev_insn)
+ && (tem = single_set (prev_insn)) != 0
+ && SET_DEST (tem) == cc0_rtx
+ && ! reg_mentioned_p (cc0_rtx, PATTERN (insn)))
+ delete_insn (prev_insn);
+ }
+
+ /* If this insn is not the last insn in the basic block,
+ it will be PREV_INSN(insn) in the next iteration. If
+ we recorded any CC0-related information for this insn,
+ remember it. */
+ if (insn != BB_END (bb))
+ {
+ prev_insn_cc0 = this_insn_cc0;
+ prev_insn_cc0_mode = this_insn_cc0_mode;
+ }
#endif
- insn = JUMP_LABEL (insn);
- continue;
}
}
- if (GET_MODE (insn) == QImode)
- PUT_MODE (insn, VOIDmode);
+ /* With non-call exceptions, we are not always able to update
+ the CFG properly inside cse_insn. So clean up possibly
+ redundant EH edges here. */
+ if (flag_non_call_exceptions && have_eh_succ_edges (bb))
+ cse_cfg_altered |= purge_dead_edges (bb);
+
+ /* If we changed a conditional jump, we may have terminated
+ the path we are following. Check that by verifying that
+ the edge we would take still exists. If the edge does
+ not exist anymore, purge the remainder of the path.
+ Note that this will cause us to return to the caller. */
+ if (path_entry < path_size - 1)
+ {
+ basic_block next_bb = ebb_data->path[path_entry + 1].bb;
+ if (!find_edge (bb, next_bb))
+ {
+ do
+ {
+ path_size--;
+
+ /* If we truncate the path, we must also reset the
+ visited bit on the remaining blocks in the path,
+ or we will never visit them at all. */
+ RESET_BIT (cse_visited_basic_blocks,
+ ebb_data->path[path_size].bb->index);
+ ebb_data->path[path_size].bb = NULL;
+ }
+ while (path_size - 1 != path_entry);
+ ebb_data->path_size = path_size;
+ }
+ }
- if (GET_RTX_CLASS (code) == RTX_INSN)
+ /* If this is a conditional jump insn, record any known
+ equivalences due to the condition being tested. */
+ insn = BB_END (bb);
+ if (path_entry < path_size - 1
+ && JUMP_P (insn)
+ && single_set (insn)
+ && any_condjump_p (insn))
{
- rtx p;
+ basic_block next_bb = ebb_data->path[path_entry + 1].bb;
+ bool taken = (next_bb == BRANCH_EDGE (bb)->dest);
+ record_jump_equiv (insn, taken);
+ }
- /* Process notes first so we have all notes in canonical forms when
- looking for duplicate operations. */
+#ifdef HAVE_cc0
+ /* Clear the CC0-tracking related insns, they can't provide
+ useful information across basic block boundaries. */
+ prev_insn_cc0 = 0;
+#endif
+ }
- if (REG_NOTES (insn))
- REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
+ gcc_assert (next_qty <= max_qty);
- /* Track when we are inside in LIBCALL block. Inside such a block,
- we do not want to record destinations. The last insn of a
- LIBCALL block is not considered to be part of the block, since
- its destination is the result of the block and hence should be
- recorded. */
+ free (qty_table);
+}
- if (REG_NOTES (insn) != 0)
- {
- if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
- libcall_insn = XEXP (p, 0);
- else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- {
- /* Keep libcall_insn for the last SET insn of a no-conflict
- block to prevent changing the destination. */
- if (! no_conflict)
- libcall_insn = 0;
- else
- no_conflict = -1;
- }
- else if (find_reg_note (insn, REG_NO_CONFLICT, NULL_RTX))
- no_conflict = 1;
- }
+\f
+/* Perform cse on the instructions of a function.
+ F is the first instruction.
+ NREGS is one plus the highest pseudo-reg number used in the instruction.
- cse_insn (insn, libcall_insn);
+ Return 2 if jump optimizations should be redone due to simplifications
+ in conditional jump instructions.
+ Return 1 if the CFG should be cleaned up because it has been modified.
+ Return 0 otherwise. */
- if (no_conflict == -1)
- {
- libcall_insn = 0;
- no_conflict = 0;
- }
-
- /* If we haven't already found an insn where we added a LABEL_REF,
- check this one. */
- if (NONJUMP_INSN_P (insn) && ! recorded_label_ref
- && for_each_rtx (&PATTERN (insn), check_for_label_ref,
- (void *) insn))
- recorded_label_ref = 1;
- }
+int
+cse_main (rtx f ATTRIBUTE_UNUSED, int nregs)
+{
+ struct cse_basic_block_data ebb_data;
+ basic_block bb;
+ int *rc_order = XNEWVEC (int, last_basic_block);
+ int i, n_blocks;
+
+ df_set_flags (DF_LR_RUN_DCE);
+ df_analyze ();
+ df_set_flags (DF_DEFER_INSN_RESCAN);
- /* If INSN is now an unconditional jump, skip to the end of our
- basic block by pretending that we just did the last insn in the
- basic block. If we are jumping to the end of our block, show
- that we can have one usage of TO. */
+ reg_scan (get_insns (), max_reg_num ());
+ init_cse_reg_info (nregs);
+
+ ebb_data.path = XNEWVEC (struct branch_path,
+ PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+
+ cse_cfg_altered = false;
+ cse_jumps_altered = false;
+ recorded_label_ref = false;
+ constant_pool_entries_cost = 0;
+ constant_pool_entries_regcost = 0;
+ ebb_data.path_size = 0;
+ ebb_data.nsets = 0;
+ rtl_hooks = cse_rtl_hooks;
+
+ init_recog ();
+ init_alias_analysis ();
+
+ reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs);
+
+ /* Set up the table of already visited basic blocks. */
+ cse_visited_basic_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (cse_visited_basic_blocks);
- if (any_uncondjump_p (insn))
+ /* Loop over basic blocks in reverse completion order (RPO),
+ excluding the ENTRY and EXIT blocks. */
+ n_blocks = pre_and_rev_post_order_compute (NULL, rc_order, false);
+ i = 0;
+ while (i < n_blocks)
+ {
+ /* Find the first block in the RPO queue that we have not yet
+ processed before. */
+ do
{
- if (to == 0)
- {
- free (qty_table);
- return 0;
- }
+ bb = BASIC_BLOCK (rc_order[i++]);
+ }
+ while (TEST_BIT (cse_visited_basic_blocks, bb->index)
+ && i < n_blocks);
+
+ /* Find all paths starting with BB, and process them. */
+ while (cse_find_path (bb, &ebb_data, flag_cse_follow_jumps))
+ {
+ /* Pre-scan the path. */
+ cse_prescan_path (&ebb_data);
- if (JUMP_LABEL (insn) == to)
- to_usage = 1;
+ /* If this basic block has no sets, skip it. */
+ if (ebb_data.nsets == 0)
+ continue;
- /* Maybe TO was deleted because the jump is unconditional.
- If so, there is nothing left in this basic block. */
- /* ??? Perhaps it would be smarter to set TO
- to whatever follows this insn,
- and pretend the basic block had always ended here. */
- if (INSN_DELETED_P (to))
- break;
+ /* Get a reasonable estimate for the maximum number of qty's
+ needed for this path. For this, we take the number of sets
+ and multiply that by MAX_RECOG_OPERANDS. */
+ max_qty = ebb_data.nsets * MAX_RECOG_OPERANDS;
- insn = PREV_INSN (to);
+ /* Dump the path we're about to process. */
+ if (dump_file)
+ cse_dump_path (&ebb_data, ebb_data.nsets, dump_file);
+
+ cse_extended_basic_block (&ebb_data);
}
}
- gcc_assert (next_qty <= max_qty);
-
- free (qty_table);
+ /* Clean up. */
+ end_alias_analysis ();
+ free (reg_eqv_table);
+ free (ebb_data.path);
+ sbitmap_free (cse_visited_basic_blocks);
+ free (rc_order);
+ rtl_hooks = general_rtl_hooks;
- return to ? NEXT_INSN (to) : 0;
+ if (cse_jumps_altered || recorded_label_ref)
+ return 2;
+ else if (cse_cfg_altered)
+ return 1;
+ else
+ return 0;
}
\f
-/* Called via for_each_rtx to see if an insn is using a LABEL_REF for which
- there isn't a REG_LABEL note. Return one if so. DATA is the insn. */
+/* Called via for_each_rtx to see if an insn is using a LABEL_REF for
+ which there isn't a REG_LABEL_OPERAND note.
+ Return one if so. DATA is the insn. */
static int
check_for_label_ref (rtx *rtl, void *data)
{
rtx insn = (rtx) data;
- /* If this insn uses a LABEL_REF and there isn't a REG_LABEL note for it,
- we must rerun jump since it needs to place the note. If this is a
- LABEL_REF for a CODE_LABEL that isn't in the insn chain, don't do this
- since no REG_LABEL will be added. */
+ /* If this insn uses a LABEL_REF and there isn't a REG_LABEL_OPERAND
+ note for it, we must rerun jump since it needs to place the note. If
+ this is a LABEL_REF for a CODE_LABEL that isn't in the insn chain,
+ don't do this since no REG_LABEL_OPERAND will be added. */
return (GET_CODE (*rtl) == LABEL_REF
&& ! LABEL_REF_NONLOCAL_P (*rtl)
+ && (!JUMP_P (insn)
+ || !label_is_jump_target_p (XEXP (*rtl, 0), insn))
&& LABEL_P (XEXP (*rtl, 0))
&& INSN_UID (XEXP (*rtl, 0)) != 0
- && ! find_reg_note (insn, REG_LABEL, XEXP (*rtl, 0)));
+ && ! find_reg_note (insn, REG_LABEL_OPERAND, XEXP (*rtl, 0)));
}
\f
/* Count the number of times registers are used (not set) in X.
case CONST:
case CONST_INT:
case CONST_DOUBLE:
+ case CONST_FIXED:
case CONST_VECTOR:
case SYMBOL_REF:
case LABEL_REF:
return true;
}
-/* Return true if libcall is dead as a whole. */
-
-static bool
-dead_libcall_p (rtx insn, int *counts)
-{
- rtx note, set, new;
-
- /* See if there's a REG_EQUAL note on this insn and try to
- replace the source with the REG_EQUAL expression.
-
- We assume that insns with REG_RETVALs can only be reg->reg
- copies at this point. */
- note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
- if (!note)
- return false;
-
- set = single_set (insn);
- if (!set)
- return false;
-
- new = simplify_rtx (XEXP (note, 0));
- if (!new)
- new = XEXP (note, 0);
-
- /* While changing insn, we must update the counts accordingly. */
- count_reg_usage (insn, counts, NULL_RTX, -1);
-
- if (validate_change (insn, &SET_SRC (set), new, 0))
- {
- count_reg_usage (insn, counts, NULL_RTX, 1);
- remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX));
- remove_note (insn, note);
- return true;
- }
-
- if (CONSTANT_P (new))
- {
- new = force_const_mem (GET_MODE (SET_DEST (set)), new);
- if (new && validate_change (insn, &SET_SRC (set), new, 0))
- {
- count_reg_usage (insn, counts, NULL_RTX, 1);
- remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX));
- remove_note (insn, note);
- return true;
- }
- }
-
- count_reg_usage (insn, counts, NULL_RTX, 1);
- return false;
-}
-
/* Scan all the insns and delete any that are dead; i.e., they store a register
that is never used or they copy a register to itself.
{
int *counts;
rtx insn, prev;
- int in_libcall = 0, dead_libcall = 0;
int ndead = 0;
timevar_push (TV_DELETE_TRIVIALLY_DEAD);
if (!INSN_P (insn))
continue;
- /* Don't delete any insns that are part of a libcall block unless
- we can delete the whole libcall block.
-
- Flow or loop might get confused if we did that. Remember
- that we are scanning backwards. */
- if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- {
- in_libcall = 1;
- live_insn = 1;
- dead_libcall = dead_libcall_p (insn, counts);
- }
- else if (in_libcall)
- live_insn = ! dead_libcall;
- else
- live_insn = insn_live_p (insn, counts);
+ live_insn = insn_live_p (insn, counts);
/* If this is a dead insn, delete it and show registers in it aren't
being used. */
- if (! live_insn)
+ if (! live_insn && dbg_cnt (delete_trivial_dead))
{
count_reg_usage (insn, counts, NULL_RTX, -1);
delete_insn_and_edges (insn);
ndead++;
}
-
- if (in_libcall && find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- {
- in_libcall = 0;
- dead_libcall = 0;
- }
}
if (dump_file && ndead)
permitted to change the mode of CC_SRC to a compatible mode. This
returns VOIDmode if no equivalent assignments were found.
Otherwise it returns the mode which CC_SRC should wind up with.
+ ORIG_BB should be the same as BB in the outermost cse_cc_succs call,
+ but is passed unmodified down to recursive calls in order to prevent
+ endless recursion.
The main complexity in this function is handling the mode issues.
We may have more than one duplicate which we can eliminate, and we
try to find a mode which will work for multiple duplicates. */
static enum machine_mode
-cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode)
+cse_cc_succs (basic_block bb, basic_block orig_bb, rtx cc_reg, rtx cc_src,
+ bool can_change_mode)
{
bool found_equiv;
enum machine_mode mode;
continue;
if (EDGE_COUNT (e->dest->preds) != 1
- || e->dest == EXIT_BLOCK_PTR)
+ || e->dest == EXIT_BLOCK_PTR
+ /* Avoid endless recursion on unreachable blocks. */
+ || e->dest == orig_bb)
continue;
end = NEXT_INSN (BB_END (e->dest));
{
enum machine_mode submode;
- submode = cse_cc_succs (e->dest, cc_reg, cc_src, false);
+ submode = cse_cc_succs (e->dest, orig_bb, cc_reg, cc_src, false);
if (submode != VOIDmode)
{
gcc_assert (submode == mode);
newreg);
}
- delete_insn (insns[i]);
+ delete_insn_and_edges (insns[i]);
}
return mode;
the basic block. */
orig_mode = GET_MODE (cc_src);
- mode = cse_cc_succs (bb, cc_reg, cc_src, true);
+ mode = cse_cc_succs (bb, bb, cc_reg, cc_src, true);
if (mode != VOIDmode)
{
gcc_assert (mode == GET_MODE (cc_src));
if (dump_file)
dump_flow_info (dump_file, dump_flags);
- reg_scan (get_insns (), max_reg_num ());
-
tem = cse_main (get_insns (), max_reg_num ());
- if (tem)
- rebuild_jump_labels (get_insns ());
- if (purge_all_dead_edges ())
- delete_unreachable_blocks ();
-
- delete_trivially_dead_insns (get_insns (), max_reg_num ());
/* If we are not running more CSE passes, then we are no longer
expecting CSE to be run. But always rerun it in a cheap mode. */
cse_not_expected = !flag_rerun_cse_after_loop && !flag_gcse;
- if (tem)
- delete_dead_jumptables ();
+ if (tem == 2)
+ {
+ timevar_push (TV_JUMP);
+ rebuild_jump_labels (get_insns ());
+ cleanup_cfg (0);
+ timevar_pop (TV_JUMP);
+ }
+ else if (tem == 1 || optimize > 1)
+ cleanup_cfg (0);
- if (tem || optimize > 1)
- cleanup_cfg (CLEANUP_EXPENSIVE);
return 0;
}
-struct tree_opt_pass pass_cse =
+struct rtl_opt_pass pass_cse =
{
+ {
+ RTL_PASS,
"cse1", /* name */
gate_handle_cse, /* gate */
rest_of_handle_cse, /* execute */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func |
- TODO_ggc_collect, /* todo_flags_finish */
- 's' /* letter */
+ TODO_ggc_collect |
+ TODO_verify_flow, /* todo_flags_finish */
+ }
};
bypassed safely. */
cse_condition_code_reg ();
- purge_all_dead_edges ();
delete_trivially_dead_insns (get_insns (), max_reg_num ());
- if (tem)
+ if (tem == 2)
{
timevar_push (TV_JUMP);
rebuild_jump_labels (get_insns ());
- delete_dead_jumptables ();
- cleanup_cfg (CLEANUP_EXPENSIVE);
+ cleanup_cfg (0);
timevar_pop (TV_JUMP);
}
- reg_scan (get_insns (), max_reg_num ());
+ else if (tem == 1)
+ cleanup_cfg (0);
+
cse_not_expected = 1;
return 0;
}
-struct tree_opt_pass pass_cse2 =
+struct rtl_opt_pass pass_cse2 =
{
+ {
+ RTL_PASS,
"cse2", /* name */
gate_handle_cse2, /* gate */
rest_of_handle_cse2, /* execute */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func |
- TODO_ggc_collect, /* todo_flags_finish */
- 't' /* letter */
+ TODO_ggc_collect |
+ TODO_verify_flow /* todo_flags_finish */
+ }
};