/* Common subexpression elimination for GNU compiler.
- Copyright (C) 1987, 88, 89, 92-7, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 89, 92-7, 1998, 1999 Free Software Foundation, Inc.
This file is part of GNU CC.
#include "config.h"
-/* Must precede rtl.h for FFS. */
-#include <stdio.h>
+/* stdio.h must precede rtl.h for FFS. */
+#include "system.h"
+#include <setjmp.h>
#include "rtl.h"
+#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "flags.h"
#include "real.h"
#include "insn-config.h"
#include "recog.h"
+#include "function.h"
#include "expr.h"
-
-#include <setjmp.h>
+#include "toplev.h"
+#include "output.h"
+#include "hashtab.h"
+#include "ggc.h"
/* The basic idea of common subexpression elimination is to go
through the code, keeping a record of expressions that would
expressions encountered with the cheapest equivalent expression.
It is too complicated to keep track of the different possibilities
- when control paths merge; so, at each label, we forget all that is
- known and start fresh. This can be described as processing each
- basic block separately. Note, however, that these are not quite
- the same as the basic blocks found by a later pass and used for
- data flow analysis and register packing. We do not need to start fresh
- after a conditional jump instruction if there is no label there.
+ when control paths merge in this code; so, at each label, we forget all
+ that is known and start fresh. This can be described as processing each
+ extended basic block separately. We have a separate pass to perform
+ global CSE.
+
+ Note CSE can turn a conditional or computed jump into a nop or
+ an unconditional jump. When this occurs we arrange to run the jump
+ optimizer after CSE to delete the unreachable code.
We use two data structures to record the equivalent expressions:
a hash table for most expressions, and several vectors together
static rtx this_insn;
-/* Index by register number, gives the quantity number
- of the register's current contents. */
-
-static int *reg_qty;
-
/* Index by register number, gives the number of the next (or
previous) register in the chain of registers sharing the same
value.
static int *reg_next_eqv;
static int *reg_prev_eqv;
-/* Index by register number, gives the number of times
- that register has been altered in the current basic block. */
+struct cse_reg_info {
+ /* The number of times the register has been altered in the current
+ basic block. */
+ int reg_tick;
+
+ /* The next cse_reg_info structure in the free or used list. */
+ struct cse_reg_info* next;
-static int *reg_tick;
+ /* The REG_TICK value at which rtx's containing this register are
+ valid in the hash table. If this does not equal the current
+ reg_tick value, such expressions existing in the hash table are
+ invalid. */
+ int reg_in_table;
-/* Index by register number, gives the reg_tick value at which
- rtx's containing this register are valid in the hash table.
- If this does not equal the current reg_tick value, such expressions
- existing in the hash table are invalid.
- If this is -1, no expressions containing this register have been
- entered in the table. */
+ /* The quantity number of the register's current contents. */
+ int reg_qty;
-static int *reg_in_table;
+ /* Search key */
+ int regno;
+};
+
+/* A free list of cse_reg_info entries. */
+static struct cse_reg_info *cse_reg_info_free_list;
+
+/* A used list of cse_reg_info entries. */
+static struct cse_reg_info *cse_reg_info_used_list;
+static struct cse_reg_info *cse_reg_info_used_list_end;
+
+/* A mapping from registers to cse_reg_info data structures. */
+static hash_table_t cse_reg_info_tree;
+
+/* The last lookup we did into the cse_reg_info_tree. This allows us
+ to cache repeated lookups. */
+static int cached_regno;
+static struct cse_reg_info *cached_cse_reg_info;
/* A HARD_REG_SET containing all the hard registers for which there is
currently a REG expression in the hash table. Note the difference
static HARD_REG_SET regs_invalidated_by_call;
-/* Two vectors of ints:
- one containing max_reg -1's; the other max_reg + 500 (an approximation
- for max_qty) elements where element i contains i.
- These are used to initialize various other vectors fast. */
-
-static int *all_minus_one;
-static int *consec_ints;
-
/* CUID of insn that starts the basic block currently being cse-processed. */
static int cse_basic_block_start;
#define HASH(X, M) \
(GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
- ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
+ ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X))) % NBUCKETS \
: canon_hash (X, M) % NBUCKETS)
/* Determine whether register number N is considered a fixed register for CSE.
: 2) \
: notreg_cost(X))
+/* Get the info associated with register N. */
+
+#define GET_CSE_REG_INFO(N) \
+ (((N) == cached_regno && cached_cse_reg_info) \
+ ? cached_cse_reg_info : get_cse_reg_info ((N)))
+
+/* Get the number of times this register has been updated in this
+ basic block. */
+
+#define REG_TICK(N) ((GET_CSE_REG_INFO (N))->reg_tick)
+
+/* Get the point at which REG was recorded in the table. */
+
+#define REG_IN_TABLE(N) ((GET_CSE_REG_INFO (N))->reg_in_table)
+
+/* Get the quantity number for REG. */
+
+#define REG_QTY(N) ((GET_CSE_REG_INFO (N))->reg_qty)
+
/* Determine if the quantity number for register X represents a valid index
into the `qty_...' variables. */
-#define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
+#define REGNO_QTY_VALID_P(N) (REG_QTY (N) != (N))
+
+#ifdef ADDRESS_COST
+/* The ADDRESS_COST macro does not deal with ADDRESSOF nodes. But,
+ during CSE, such nodes are present. Using an ADDRESSOF node which
+ refers to the address of a REG is a good thing because we can then
+ turn (MEM (ADDRESSSOF (REG))) into just plain REG. */
+#define CSE_ADDRESS_COST(RTX) \
+ ((GET_CODE (RTX) == ADDRESSOF && REG_P (XEXP ((RTX), 0))) \
+ ? -1 : ADDRESS_COST(RTX))
+#endif
static struct table_elt *table[NBUCKETS];
#define FIXED_BASE_PLUS_P(X) \
((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
- || (X) == arg_pointer_rtx \
+ || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
|| (X) == virtual_stack_vars_rtx \
|| (X) == virtual_incoming_args_rtx \
|| (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
&& (XEXP (X, 0) == frame_pointer_rtx \
|| XEXP (X, 0) == hard_frame_pointer_rtx \
- || XEXP (X, 0) == arg_pointer_rtx \
+ || ((X) == arg_pointer_rtx \
+ && fixed_regs[ARG_POINTER_REGNUM]) \
|| XEXP (X, 0) == virtual_stack_vars_rtx \
|| XEXP (X, 0) == virtual_incoming_args_rtx)) \
|| GET_CODE (X) == ADDRESSOF)
|| (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
&& (XEXP (X, 0) == frame_pointer_rtx \
|| XEXP (X, 0) == hard_frame_pointer_rtx \
- || XEXP (X, 0) == arg_pointer_rtx \
+ || ((X) == arg_pointer_rtx \
+ && fixed_regs[ARG_POINTER_REGNUM]) \
|| XEXP (X, 0) == virtual_stack_vars_rtx \
|| XEXP (X, 0) == virtual_incoming_args_rtx)) \
|| (X) == stack_pointer_rtx \
static void invalidate PROTO((rtx, enum machine_mode));
static int cse_rtx_varies_p PROTO((rtx));
static void remove_invalid_refs PROTO((int));
+static void remove_invalid_subreg_refs PROTO((int, int, enum machine_mode));
static void rehash_using_reg PROTO((rtx));
static void invalidate_memory PROTO((void));
static void invalidate_for_call PROTO((void));
static void record_jump_equiv PROTO((rtx, int));
static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
rtx, rtx, int));
-static void cse_insn PROTO((rtx, int));
+static void cse_insn PROTO((rtx, rtx));
static int note_mem_written PROTO((rtx));
static void invalidate_from_clobbers PROTO((rtx));
static rtx cse_process_notes PROTO((rtx, rtx));
static void cse_set_around_loop PROTO((rtx, rtx, rtx));
static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
static void count_reg_usage PROTO((rtx, int *, rtx, int));
-
-extern int rtx_equal_function_value_matters;
+extern void dump_class PROTO((struct table_elt*));
+static void check_fold_consts PROTO((PTR));
+static struct cse_reg_info* get_cse_reg_info PROTO((int));
+static unsigned int hash_cse_reg_info PROTO((hash_table_entry_t));
+static int cse_reg_info_equal_p PROTO((hash_table_entry_t,
+ hash_table_entry_t));
+
+static void flush_hash_table PROTO((void));
\f
+/* Dump the expressions in the equivalence class indicated by CLASSP.
+ This function is used only for debugging. */
+void
+dump_class (classp)
+ struct table_elt *classp;
+{
+ struct table_elt *elt;
+
+ fprintf (stderr, "Equivalence chain for ");
+ print_rtl (stderr, classp->exp);
+ fprintf (stderr, ": \n");
+
+ for (elt = classp->first_same_value; elt; elt = elt->next_same_value)
+ {
+ print_rtl (stderr, elt->exp);
+ fprintf (stderr, "\n");
+ }
+}
+
/* Return an estimate of the cost of computing rtx X.
One use is in cse, to decide which expression to keep in the hash table.
Another is in rtl generation, to pick the cheapest way to multiply.
int
rtx_cost (x, outer_code)
rtx x;
- enum rtx_code outer_code;
+ enum rtx_code outer_code ATTRIBUTE_UNUSED;
{
register int i, j;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
register int total;
if (x == 0)
#ifdef RTX_COSTS
RTX_COSTS (x, code, outer_code);
#endif
+#ifdef CONST_COSTS
CONST_COSTS (x, code, outer_code);
+#endif
+
+ default:
+#ifdef DEFAULT_RTX_COSTS
+ DEFAULT_RTX_COSTS(x, code, outer_code);
+#endif
+ break;
}
/* Sum the costs of the sub-rtx's, plus cost of this operation,
return total;
}
\f
+static struct cse_reg_info *
+get_cse_reg_info (regno)
+ int regno;
+{
+ struct cse_reg_info *cri;
+ struct cse_reg_info **entry;
+ struct cse_reg_info temp;
+
+ /* See if we already have this entry. */
+ temp.regno = regno;
+ entry = (struct cse_reg_info **) find_hash_table_entry (cse_reg_info_tree,
+ &temp, TRUE);
+
+ if (*entry)
+ cri = *entry;
+ else
+ {
+ /* Get a new cse_reg_info structure. */
+ if (cse_reg_info_free_list)
+ {
+ cri = cse_reg_info_free_list;
+ cse_reg_info_free_list = cri->next;
+ }
+ else
+ cri = (struct cse_reg_info *) xmalloc (sizeof (struct cse_reg_info));
+
+ /* Initialize it. */
+ cri->reg_tick = 0;
+ cri->reg_in_table = -1;
+ cri->reg_qty = regno;
+ cri->regno = regno;
+ cri->next = cse_reg_info_used_list;
+ cse_reg_info_used_list = cri;
+ if (!cse_reg_info_used_list_end)
+ cse_reg_info_used_list_end = cri;
+
+ *entry = cri;
+ }
+
+ /* Cache this lookup; we tend to be looking up information about the
+ same register several times in a row. */
+ cached_regno = regno;
+ cached_cse_reg_info = cri;
+
+ return cri;
+}
+
+static unsigned int
+hash_cse_reg_info (el_ptr)
+ hash_table_entry_t el_ptr;
+{
+ return ((const struct cse_reg_info *) el_ptr)->regno;
+}
+
+static int
+cse_reg_info_equal_p (el_ptr1, el_ptr2)
+ hash_table_entry_t el_ptr1;
+ hash_table_entry_t el_ptr2;
+{
+ return (((const struct cse_reg_info *) el_ptr1)->regno
+ == ((const struct cse_reg_info *) el_ptr2)->regno);
+}
+
/* Clear the hash table and initialize each register with its own quantity,
for a new basic block. */
next_qty = max_reg;
- bzero ((char *) reg_tick, max_reg * sizeof (int));
+ if (cse_reg_info_tree)
+ {
+ delete_hash_table (cse_reg_info_tree);
+ if (cse_reg_info_used_list)
+ {
+ cse_reg_info_used_list_end->next = cse_reg_info_free_list;
+ cse_reg_info_free_list = cse_reg_info_used_list;
+ cse_reg_info_used_list = cse_reg_info_used_list_end = 0;
+ }
+ cached_cse_reg_info = 0;
+ }
+
+ cse_reg_info_tree = create_hash_table (0, hash_cse_reg_info,
+ cse_reg_info_equal_p);
- bcopy ((char *) all_minus_one, (char *) reg_in_table,
- max_reg * sizeof (int));
- bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
CLEAR_HARD_REG_SET (hard_regs_in_table);
/* The per-quantity values used to be initialized here, but it is
if (next_qty >= max_qty)
abort ();
- q = reg_qty[reg] = next_qty++;
+ q = REG_QTY (reg) = next_qty++;
qty_first_reg[q] = reg;
qty_last_reg[q] = reg;
qty_const[q] = qty_const_insn[q] = 0;
register int new, old;
{
register int lastr, firstr;
- register int q = reg_qty[old];
+ register int q = REG_QTY (old);
/* Nothing should become eqv until it has a "non-invalid" qty number. */
if (! REGNO_QTY_VALID_P (old))
abort ();
- reg_qty[new] = q;
+ REG_QTY (new) = q;
firstr = qty_first_reg[q];
lastr = qty_last_reg[q];
delete_reg_equiv (reg)
register int reg;
{
- register int q = reg_qty[reg];
+ register int q = REG_QTY (reg);
register int p, n;
/* If invalid, do nothing. */
else
qty_first_reg[q] = n;
- reg_qty[reg] = reg;
+ REG_QTY (reg) = reg;
}
/* Remove any invalid expressions from the hash table
{
register enum rtx_code code;
register int i, j;
- register char *fmt;
+ register const char *fmt;
register int changed = 0;
if (x == 0)
for (i = regno; i < endregno; i++)
{
- if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
+ if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
remove_invalid_refs (i);
- reg_in_table[i] = reg_tick[i];
+ REG_IN_TABLE (i) = REG_TICK (i);
+ }
+
+ return 0;
+ }
+
+ /* If this is a SUBREG, we don't want to discard other SUBREGs of the same
+ pseudo if they don't use overlapping words. We handle only pseudos
+ here for simplicity. */
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
+ {
+ int i = REGNO (SUBREG_REG (x));
+
+ if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
+ {
+ /* If reg_tick has been incremented more than once since
+ reg_in_table was last set, that means that the entire
+ register has been set before, so discard anything memorized
+ for the entrire register, including all SUBREG expressions. */
+ if (REG_IN_TABLE (i) != REG_TICK (i) - 1)
+ remove_invalid_refs (i);
+ else
+ remove_invalid_subreg_refs (i, SUBREG_WORD (x), GET_MODE (x));
}
+ REG_IN_TABLE (i) = REG_TICK (i);
return 0;
}
wrong mode for that equivalence, don't do anything here. */
if (REGNO_QTY_VALID_P (regno)
- && qty_mode[reg_qty[regno]] != GET_MODE (x))
+ && qty_mode[REG_QTY (regno)] != GET_MODE (x))
return 0;
if (modified || ! REGNO_QTY_VALID_P (regno))
}
make_new_qty (regno);
- qty_mode[reg_qty[regno]] = GET_MODE (x);
+ qty_mode[REG_QTY (regno)] = GET_MODE (x);
return 1;
}
else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
&& ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
{
+ int regno = REGNO (SUBREG_REG (x));
+
insert_regs (SUBREG_REG (x), NULL_PTR, 0);
- mention_regs (SUBREG_REG (x));
+ /* Mention_regs checks if REG_TICK is exactly one larger than
+ REG_IN_TABLE to find out if there was only a single preceding
+ invalidation - for the SUBREG - or another one, which would be
+ for the full register. Since we don't invalidate the SUBREG
+ here first, we might have to bump up REG_TICK so that mention_regs
+ will do the right thing. */
+ if (REG_IN_TABLE (regno) >= 0
+ && REG_TICK (regno) == REG_IN_TABLE (regno) + 1)
+ REG_TICK (regno)++;
+ mention_regs (x);
return 1;
}
else
{
register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
GET_MODE (x));
+ /* If we are looking for a CONST_INT, the mode doesn't really matter, as
+ long as we are narrowing. So if we looked in vain for a mode narrower
+ than word_mode before, look for word_mode now. */
+ if (p == 0 && code == CONST_INT
+ && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode))
+ {
+ x = copy_rtx (x);
+ PUT_MODE (x, word_mode);
+ p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS, word_mode);
+ }
+
if (p == 0)
return 0;
if (elt->is_const && classp && GET_CODE (classp->exp) == REG
&& GET_CODE (x) != REG)
{
- qty_const[reg_qty[REGNO (classp->exp)]]
- = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
- qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
+ qty_const[REG_QTY (REGNO (classp->exp))]
+ = gen_lowpart_if_possible (qty_mode[REG_QTY (REGNO (classp->exp))], x);
+ qty_const_insn[REG_QTY (REGNO (classp->exp))] = this_insn;
}
- else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]]
+ else if (GET_CODE (x) == REG && classp && ! qty_const[REG_QTY (REGNO (x))]
&& ! elt->is_const)
{
register struct table_elt *p;
{
if (p->is_const && GET_CODE (p->exp) != REG)
{
- qty_const[reg_qty[REGNO (x)]]
+ qty_const[REG_QTY (REGNO (x))]
= gen_lowpart_if_possible (GET_MODE (x), p->exp);
- qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
+ qty_const_insn[REG_QTY (REGNO (x))] = this_insn;
break;
}
}
}
- else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
- && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
- qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
+ else if (GET_CODE (x) == REG && qty_const[REG_QTY (REGNO (x))]
+ && GET_MODE (x) == qty_mode[REG_QTY (REGNO (x))])
+ qty_const_insn[REG_QTY (REGNO (x))] = this_insn;
/* If this is a constant with symbolic value,
and it has a term with an explicit integer value,
}
}
\f
+
+/* Flush the entire hash table. */
+
+static void
+flush_hash_table ()
+{
+ int i;
+ struct table_elt *p;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = table[i])
+ {
+ /* Note that invalidate can remove elements
+ after P in the current hash chain. */
+ if (GET_CODE (p->exp) == REG)
+ invalidate (p->exp, p->mode);
+ else
+ remove_from_table (p, i);
+ }
+}
+
+
/* Remove from the hash table, or mark as invalid,
all expressions whose values could be altered by storing in X.
X is a register, a subreg, or a memory reference with nonvarying address
register unsigned hash = HASH (x, GET_MODE (x));
/* Remove REGNO from any quantity list it might be on and indicate
- that it's value might have changed. If it is a pseudo, remove its
+ that its value might have changed. If it is a pseudo, remove its
entry from the hash table.
For a hard register, we do the first two actions above for any
overlap these registers. */
delete_reg_equiv (regno);
- reg_tick[regno]++;
+ REG_TICK (regno)++;
if (regno >= FIRST_PSEUDO_REGISTER)
{
struct table_elt *elt;
- while (elt = lookup_for_remove (x, hash, GET_MODE (x)))
+ while ((elt = lookup_for_remove (x, hash, GET_MODE (x))))
remove_from_table (elt, hash);
}
else
in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
delete_reg_equiv (i);
- reg_tick[i]++;
+ REG_TICK (i)++;
}
if (in_table)
tendregno
= tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
if (tendregno > regno && tregno < endregno)
- remove_from_table (p, hash);
+ remove_from_table (p, hash);
}
}
return;
}
+ /* If X is a parallel, invalidate all of its elements. */
+
+ if (GET_CODE (x) == PARALLEL)
+ {
+ for (i = XVECLEN (x, 0) - 1; i >= 0 ; --i)
+ invalidate (XVECEXP (x, 0, i), VOIDmode);
+ return;
+ }
+
+ /* If X is an expr_list, this is part of a disjoint return value;
+ extract the location in question ignoring the offset. */
+
+ if (GET_CODE (x) == EXPR_LIST)
+ {
+ invalidate (XEXP (x, 0), VOIDmode);
+ return;
+ }
+
/* X is not a register; it must be a memory reference with
a nonvarying address. Remove all hash table elements
that refer to overlapping pieces of memory. */
remove_from_table (p, i);
}
}
+
+/* Likewise for a subreg with subreg_reg WORD and mode MODE. */
+static void
+remove_invalid_subreg_refs (regno, word, mode)
+ int regno;
+ int word;
+ enum machine_mode mode;
+{
+ register int i;
+ register struct table_elt *p, *next;
+ int end = word + (GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ rtx exp;
+ next = p->next_same_hash;
+
+ exp = p->exp;
+ if (GET_CODE (p->exp) != REG
+ && (GET_CODE (exp) != SUBREG
+ || GET_CODE (SUBREG_REG (exp)) != REG
+ || REGNO (SUBREG_REG (exp)) != regno
+ || (((SUBREG_WORD (exp)
+ + (GET_MODE_SIZE (GET_MODE (exp)) - 1) / UNITS_PER_WORD)
+ >= word)
+ && SUBREG_WORD (exp) <= end))
+ && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
+ remove_from_table (p, i);
+ }
+}
\f
/* Recompute the hash codes of any valid entries in the hash table that
reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
rehash_using_reg (x)
rtx x;
{
- int i;
+ unsigned int i;
struct table_elt *p, *next;
unsigned hash;
valid entries in the table, we have no work to do. */
if (GET_CODE (x) != REG
- || reg_in_table[REGNO (x)] < 0
- || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
+ || REG_IN_TABLE (REGNO (x)) < 0
+ || REG_IN_TABLE (REGNO (x)) != REG_TICK (REGNO (x)))
return;
/* Scan all hash chains looking for valid entries that mention X.
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
{
delete_reg_equiv (regno);
- if (reg_tick[regno] >= 0)
- reg_tick[regno]++;
+ if (REG_TICK (regno) >= 0)
+ REG_TICK (regno)++;
in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
}
{
next = p->next_same_hash;
- if (p->in_memory)
- {
- remove_from_table (p, hash);
- continue;
- }
-
if (GET_CODE (p->exp) != REG
|| REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
continue;
register int i, j;
register unsigned hash = 0;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
/* repeat is used to turn tail-recursion into iteration. */
repeat:
/* On some machines, we can't record any non-fixed hard register,
because extending its life will cause reload problems. We
- consider ap, fp, and sp to be fixed for this purpose.
+ consider ap, fp, and sp to be fixed for this purpose.
+
+ We also consider CCmode registers to be fixed for this purpose;
+ failure to do so leads to failure to simplify 0<100 type of
+ conditionals.
+
On all machines, we can't record any global registers. */
if (regno < FIRST_PSEUDO_REGISTER
&& regno != FRAME_POINTER_REGNUM
&& regno != HARD_FRAME_POINTER_REGNUM
&& regno != ARG_POINTER_REGNUM
- && regno != STACK_POINTER_REGNUM)))
+ && regno != STACK_POINTER_REGNUM
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_CC)))
{
do_not_record = 1;
return 0;
}
- hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
+ hash += ((unsigned) REG << 7) + (unsigned) REG_QTY (regno);
return hash;
}
+ /* We handle SUBREG of a REG specially because the underlying
+ reg changes its hash value with every value change; we don't
+ want to have to forget unrelated subregs when one subreg changes. */
+ case SUBREG:
+ {
+ if (GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ hash += (((unsigned) SUBREG << 7)
+ + REGNO (SUBREG_REG (x)) + SUBREG_WORD (x));
+ return hash;
+ }
+ break;
+ }
+
case CONST_INT:
{
unsigned HOST_WIDE_INT tem = INTVAL (x);
if (GET_MODE (x) != VOIDmode)
for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
{
- unsigned tem = XINT (x, i);
+ unsigned HOST_WIDE_INT tem = XWINT (x, i);
hash += tem;
}
else
/* Assume there is only one rtx object for any given label. */
case LABEL_REF:
hash
- += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
+ += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
return hash;
case SYMBOL_REF:
hash
- += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
+ += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
return hash;
case MEM:
register unsigned tem = XINT (x, i);
hash += tem;
}
- else if (fmt[i] == '0')
+ else if (fmt[i] == '0' || fmt[i] == 't')
/* unused */;
else
abort ();
in all the places that search a hash table chain for an equivalent
for a given value. A possible equivalent that has different structure
has its hash code computed from different data. Whether the hash code
- is the same as that of the the given value is pure luck. */
+ is the same as that of the given value is pure luck. */
static int
exp_equiv_p (x, y, validate, equal_values)
{
register int i, j;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
/* Note: it is incorrect to assume an expression is equivalent to itself
if VALIDATE is nonzero. */
equivalent. We only have to validate if Y is a register. */
if (CONSTANT_P (x) && GET_CODE (y) == REG
&& REGNO_QTY_VALID_P (REGNO (y))
- && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
- && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
- && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
+ && GET_MODE (y) == qty_mode[REG_QTY (REGNO (y))]
+ && rtx_equal_p (x, qty_const[REG_QTY (REGNO (y))])
+ && (! validate || REG_IN_TABLE (REGNO (y)) == REG_TICK (REGNO (y))))
return 1;
if (CONSTANT_P (y) && code == REG
&& REGNO_QTY_VALID_P (REGNO (x))
- && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
- && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
+ && GET_MODE (x) == qty_mode[REG_QTY (REGNO (x))]
+ && rtx_equal_p (y, qty_const[REG_QTY (REGNO (x))]))
return 1;
return 0;
equivalent. If there are and we are not to validate, they
are equivalent. Otherwise, ensure all regs are up-to-date. */
- if (reg_qty[REGNO (x)] != reg_qty[regno])
+ if (REG_QTY (REGNO (x)) != REG_QTY (regno))
return 0;
if (! validate)
return 1;
for (i = regno; i < endregno; i++)
- if (reg_in_table[i] != reg_tick[i])
+ if (REG_IN_TABLE (i) != REG_TICK (i))
return 0;
return 1;
break;
case '0':
+ case 't':
break;
default:
{
register int i;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
repeat:
if (x == y)
start = 0;
end = 0;
+ if (flag_pic && GET_CODE (base) == PLUS
+ && XEXP (base, 0) == pic_offset_table_rtx)
+ base = XEXP (base, 1);
+
/* Registers with nonvarying addresses usually have constant equivalents;
but the frame pointer register is also possible. */
if (GET_CODE (base) == REG
&& qty_const != 0
&& REGNO_QTY_VALID_P (REGNO (base))
- && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
- && qty_const[reg_qty[REGNO (base)]] != 0)
- base = qty_const[reg_qty[REGNO (base)]];
+ && qty_mode[REG_QTY (REGNO (base))] == GET_MODE (base)
+ && qty_const[REG_QTY (REGNO (base))] != 0)
+ base = qty_const[REG_QTY (REGNO (base))];
else if (GET_CODE (base) == PLUS
&& GET_CODE (XEXP (base, 1)) == CONST_INT
&& GET_CODE (XEXP (base, 0)) == REG
&& qty_const != 0
&& REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
- && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
+ && (qty_mode[REG_QTY (REGNO (XEXP (base, 0)))]
== GET_MODE (XEXP (base, 0)))
- && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
+ && qty_const[REG_QTY (REGNO (XEXP (base, 0)))])
{
start = INTVAL (XEXP (base, 1));
- base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
+ base = qty_const[REG_QTY (REGNO (XEXP (base, 0)))];
}
/* This can happen as the result of virtual register instantiation,
if the initial offset is too large to be a valid address. */
&& GET_CODE (XEXP (base, 1)) == REG
&& qty_const != 0
&& REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
- && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
+ && (qty_mode[REG_QTY (REGNO (XEXP (base, 0)))]
== GET_MODE (XEXP (base, 0)))
- && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
+ && qty_const[REG_QTY (REGNO (XEXP (base, 0)))]
&& REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
- && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
+ && (qty_mode[REG_QTY (REGNO (XEXP (base, 1)))]
== GET_MODE (XEXP (base, 1)))
- && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
+ && qty_const[REG_QTY (REGNO (XEXP (base, 1)))])
{
- rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
- base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
+ rtx tem = qty_const[REG_QTY (REGNO (XEXP (base, 1)))];
+ base = qty_const[REG_QTY (REGNO (XEXP (base, 0)))];
/* One of the two values must be a constant. */
if (GET_CODE (base) != CONST_INT)
if (GET_CODE (x) == REG
&& REGNO_QTY_VALID_P (REGNO (x))
- && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
- && qty_const[reg_qty[REGNO (x)]] != 0)
+ && GET_MODE (x) == qty_mode[REG_QTY (REGNO (x))]
+ && qty_const[REG_QTY (REGNO (x))] != 0)
return 0;
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
&& (GET_MODE (XEXP (x, 0))
- == qty_mode[reg_qty[REGNO (XEXP (x, 0))]])
- && qty_const[reg_qty[REGNO (XEXP (x, 0))]])
+ == qty_mode[REG_QTY (REGNO (XEXP (x, 0)))])
+ && qty_const[REG_QTY (REGNO (XEXP (x, 0)))])
return 0;
/* This can happen as the result of virtual register instantiation, if
&& GET_CODE (XEXP (x, 1)) == REG
&& REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
&& (GET_MODE (XEXP (x, 0))
- == qty_mode[reg_qty[REGNO (XEXP (x, 0))]])
- && qty_const[reg_qty[REGNO (XEXP (x, 0))]]
+ == qty_mode[REG_QTY (REGNO (XEXP (x, 0)))])
+ && qty_const[REG_QTY (REGNO (XEXP (x, 0)))]
&& REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))
&& (GET_MODE (XEXP (x, 1))
- == qty_mode[reg_qty[REGNO (XEXP (x, 1))]])
- && qty_const[reg_qty[REGNO (XEXP (x, 1))]])
+ == qty_mode[REG_QTY (REGNO (XEXP (x, 1)))])
+ && qty_const[REG_QTY (REGNO (XEXP (x, 1)))])
return 0;
return rtx_varies_p (x);
{
register int i;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
if (x == 0)
return x;
|| ! REGNO_QTY_VALID_P (REGNO (x)))
return x;
- first = qty_first_reg[reg_qty[REGNO (x)]];
+ first = qty_first_reg[REG_QTY (REGNO (x))];
return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
: REGNO_REG_CLASS (first) == NO_REGS ? x
- : gen_rtx_REG (qty_mode[reg_qty[REGNO (x)]], first));
+ : gen_rtx_REG (qty_mode[REG_QTY (REGNO (x))], first));
}
default:
&& (((REGNO (new) < FIRST_PSEUDO_REGISTER)
!= (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
|| (insn_code = recog_memoized (insn)) < 0
- || insn_n_dups[insn_code] > 0))
+ || insn_data[insn_code].n_dups > 0))
validate_change (insn, &XEXP (x, i), new, 1);
else
XEXP (x, i) = new;
rtx insn;
rtx *loc;
{
- struct table_elt *elt, *p;
+ struct table_elt *elt;
rtx addr = *loc;
- int our_cost;
+#ifdef ADDRESS_COST
+ struct table_elt *p;
int found_better = 1;
+#endif
int save_do_not_record = do_not_record;
int save_hash_arg_in_memory = hash_arg_in_memory;
int save_hash_arg_in_struct = hash_arg_in_struct;
if (1
#ifdef ADDRESS_COST
- && (ADDRESS_COST (folded) < ADDRESS_COST (addr)
- || (ADDRESS_COST (folded) == ADDRESS_COST (addr)
+ && (CSE_ADDRESS_COST (folded) < CSE_ADDRESS_COST (addr)
+ || (CSE_ADDRESS_COST (folded) == CSE_ADDRESS_COST (addr)
&& rtx_cost (folded, MEM) > rtx_cost (addr, MEM)))
#else
&& rtx_cost (folded, MEM) < rtx_cost (addr, MEM)
#ifndef ADDRESS_COST
if (elt)
{
- our_cost = elt->cost;
+ int our_cost = elt->cost;
/* Find the lowest cost below ours that works. */
for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
while (found_better)
{
- int best_addr_cost = ADDRESS_COST (*loc);
+ int best_addr_cost = CSE_ADDRESS_COST (*loc);
int best_rtx_cost = (elt->cost + 1) >> 1;
struct table_elt *best_elt = elt;
found_better = 0;
for (p = elt->first_same_value; p; p = p->next_same_value)
- if (! p->flag
- && (GET_CODE (p->exp) == REG
- || exp_equiv_p (p->exp, p->exp, 1, 0))
- && (ADDRESS_COST (p->exp) < best_addr_cost
- || (ADDRESS_COST (p->exp) == best_addr_cost
- && (p->cost + 1) >> 1 > best_rtx_cost)))
+ if (! p->flag)
{
- found_better = 1;
- best_addr_cost = ADDRESS_COST (p->exp);
- best_rtx_cost = (p->cost + 1) >> 1;
- best_elt = p;
+ if ((GET_CODE (p->exp) == REG
+ || exp_equiv_p (p->exp, p->exp, 1, 0))
+ && (CSE_ADDRESS_COST (p->exp) < best_addr_cost
+ || (CSE_ADDRESS_COST (p->exp) == best_addr_cost
+ && (p->cost + 1) >> 1 > best_rtx_cost)))
+ {
+ found_better = 1;
+ best_addr_cost = CSE_ADDRESS_COST (p->exp);
+ best_rtx_cost = (p->cost + 1) >> 1;
+ best_elt = p;
+ }
}
if (found_better)
while (found_better)
{
- int best_addr_cost = ADDRESS_COST (*loc);
+ int best_addr_cost = CSE_ADDRESS_COST (*loc);
int best_rtx_cost = (COST (*loc) + 1) >> 1;
struct table_elt *best_elt = elt;
rtx best_rtx = *loc;
{
rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
- if ((ADDRESS_COST (new) < best_addr_cost
- || (ADDRESS_COST (new) == best_addr_cost
+ if ((CSE_ADDRESS_COST (new) < best_addr_cost
+ || (CSE_ADDRESS_COST (new) == best_addr_cost
&& (COST (new) + 1) >> 1 > best_rtx_cost)))
{
found_better = 1;
- best_addr_cost = ADDRESS_COST (new);
+ best_addr_cost = CSE_ADDRESS_COST (new);
best_rtx_cost = (COST (new) + 1) >> 1;
best_elt = p;
best_rtx = new;
abort ();
}
- /* Clear the bits that don't belong in our mode,
- unless they and our sign bit are all one.
- So we get either a reasonable negative value or a reasonable
- unsigned value for this mode. */
- if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ val = trunc_int_for_mode (val, mode);
return GEN_INT (val);
}
set_float_handler (NULL_PTR);
- /* Clear the bits that don't belong in our mode,
- unless they and our sign bit are all one.
- So we get either a reasonable negative value or a reasonable
- unsigned value for this mode. */
- if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
-
- /* If this would be an entire word for the target, but is not for
- the host, then sign-extend on the host so that the number will look
- the same way on the host that it would on the target.
-
- For example, when building a 64 bit alpha hosted 32 bit sparc
- targeted compiler, then we want the 32 bit unsigned value -1 to be
- represented as a 64 bit value -1, and not as 0x00000000ffffffff.
- The later confuses the sparc backend. */
-
- if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
- && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
- val |= ((HOST_WIDE_INT) (-1) << width);
+ val = trunc_int_for_mode (val, mode);
return GEN_INT (val);
}
if (GET_CODE (op1) == AND)
{
if (rtx_equal_p (op0, XEXP (op1, 0)))
- return cse_gen_binary (AND, mode, op0, gen_rtx_NOT (mode, XEXP (op1, 1)));
+ return cse_gen_binary (AND, mode, op0,
+ gen_rtx_NOT (mode, XEXP (op1, 1)));
if (rtx_equal_p (op0, XEXP (op1, 1)))
- return cse_gen_binary (AND, mode, op0, gen_rtx_NOT (mode, XEXP (op1, 0)));
+ return cse_gen_binary (AND, mode, op0,
+ gen_rtx_NOT (mode, XEXP (op1, 0)));
}
break;
return gen_rtx_MULT (mode, op0,
CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
#else
- return gen_rtx_MULT (mode, op0,
- CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
+ return
+ gen_rtx_MULT (mode, op0,
+ CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
#endif
}
}
case ROTATE:
/* Rotating ~0 always results in ~0. */
if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
- && INTVAL (op0) == GET_MODE_MASK (mode)
+ && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1))
return op0;
case SMAX:
if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
- && (INTVAL (op1)
+ && ((unsigned HOST_WIDE_INT) INTVAL (op1)
== (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
abort ();
}
- /* Clear the bits that don't belong in our mode, unless they and our sign
- bit are all one. So we get either a reasonable negative value or a
- reasonable unsigned value for this mode. */
- if (width < HOST_BITS_PER_WIDE_INT
- && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
- != ((HOST_WIDE_INT) (-1) << (width - 1))))
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
-
- /* If this would be an entire word for the target, but is not for
- the host, then sign-extend on the host so that the number will look
- the same way on the host that it would on the target.
-
- For example, when building a 64 bit alpha hosted 32 bit sparc
- targeted compiler, then we want the 32 bit unsigned value -1 to be
- represented as a 64 bit value -1, and not as 0x00000000ffffffff.
- The later confuses the sparc backend. */
-
- if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
- && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
- val |= ((HOST_WIDE_INT) (-1) << width);
+ val = trunc_int_for_mode (val, mode);
return GEN_INT (val);
}
return gen_rtx_fmt_ee (code, mode, op0, op1);
}
\f
+struct cfc_args
+{
+ /* Input */
+ rtx op0, op1;
+ /* Output */
+ int equal, op0lt, op1lt;
+};
+
+static void
+check_fold_consts (data)
+ PTR data;
+{
+ struct cfc_args * args = (struct cfc_args *) data;
+ REAL_VALUE_TYPE d0, d1;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
+ REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
+ args->equal = REAL_VALUES_EQUAL (d0, d1);
+ args->op0lt = REAL_VALUES_LESS (d0, d1);
+ args->op1lt = REAL_VALUES_LESS (d1, d0);
+}
+
/* Like simplify_binary_operation except used for relational operators.
MODE is the mode of the operands, not that of the result. If MODE
is VOIDmode, both operands must also be VOIDmode and we compare the
else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
{
- REAL_VALUE_TYPE d0, d1;
- jmp_buf handler;
+ struct cfc_args args;
+
+ /* Setup input for check_fold_consts() */
+ args.op0 = op0;
+ args.op1 = op1;
- if (setjmp (handler))
+ if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
+ /* We got an exception from check_fold_consts() */
return 0;
- set_float_handler (handler);
- REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
- REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
- equal = REAL_VALUES_EQUAL (d0, d1);
- op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
- op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
- set_float_handler (NULL_PTR);
+ /* Receive output from check_fold_consts() */
+ equal = args.equal;
+ op0lt = op0ltu = args.op0lt;
+ op1lt = op1ltu = args.op1lt;
}
#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
/* Unsigned values are never greater than the largest
unsigned value. */
if (GET_CODE (op1) == CONST_INT
- && INTVAL (op1) == GET_MODE_MASK (mode)
+ && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
&& INTEGRAL_MODE_P (mode))
return const_true_rtx;
break;
case GTU:
if (GET_CODE (op1) == CONST_INT
- && INTVAL (op1) == GET_MODE_MASK (mode)
+ && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
&& INTEGRAL_MODE_P (mode))
return const0_rtx;
break;
&& rtx_equal_p (XEXP (op0, 1), op1)
&& rtx_equal_p (XEXP (op0, 0), op2))
return op2;
+ else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
+ {
+ rtx temp;
+ temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
+ XEXP (op0, 0), XEXP (op0, 1));
+ /* See if any simplifications were possible. */
+ if (temp == const0_rtx)
+ return op2;
+ else if (temp == const1_rtx)
+ return op1;
+ }
break;
default:
{
register enum rtx_code code;
register enum machine_mode mode;
- register char *fmt;
+ register const char *fmt;
register int i;
rtx new = 0;
int copied = 0;
if (GET_CODE (addr) == REG
&& REGNO_QTY_VALID_P (REGNO (addr))
- && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
- && qty_const[reg_qty[REGNO (addr)]] != 0)
- addr = qty_const[reg_qty[REGNO (addr)]];
+ && GET_MODE (addr) == qty_mode[REG_QTY (REGNO (addr))]
+ && qty_const[REG_QTY (REGNO (addr))] != 0)
+ addr = qty_const[REG_QTY (REGNO (addr))];
/* If address is constant, split it into a base and integer offset. */
if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
/* Indicate this is a constant. This isn't a
valid form of CONST, but it will only be used
to fold the next insns and then discarded, so
- it should be safe. */
+ it should be safe.
+
+ Note this expression must be explicitly discarded,
+ by cse_insn, else it may end up in a REG_EQUAL note
+ and "escape" to cause problems elsewhere. */
return gen_rtx_CONST (GET_MODE (new), new);
}
}
/* This is the same as calling equiv_constant; it is duplicated
here for speed. */
if (REGNO_QTY_VALID_P (REGNO (arg))
- && qty_const[reg_qty[REGNO (arg)]] != 0
- && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
- && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
+ && qty_const[REG_QTY (REGNO (arg))] != 0
+ && GET_CODE (qty_const[REG_QTY (REGNO (arg))]) != REG
+ && GET_CODE (qty_const[REG_QTY (REGNO (arg))]) != PLUS)
const_arg
= gen_lowpart_if_possible (GET_MODE (arg),
- qty_const[reg_qty[REGNO (arg)]]);
+ qty_const[REG_QTY (REGNO (arg))]);
break;
case CONST:
}
}
- else if (fmt[i] == 'E')
- /* Don't try to fold inside of a vector of expressions.
- Doing nothing is harmless. */
- ;
+ else
+ {
+ if (fmt[i] == 'E')
+ /* Don't try to fold inside of a vector of expressions.
+ Doing nothing is harmless. */
+ {;}
+ }
/* If a commutative operation, place a constant integer as the second
operand unless the first operand is also a constant integer. Otherwise,
&& (folded_arg0 == folded_arg1
|| (GET_CODE (folded_arg0) == REG
&& GET_CODE (folded_arg1) == REG
- && (reg_qty[REGNO (folded_arg0)]
- == reg_qty[REGNO (folded_arg1)]))
+ && (REG_QTY (REGNO (folded_arg0))
+ == REG_QTY (REGNO (folded_arg1))))
|| ((p0 = lookup (folded_arg0,
(safe_hash (folded_arg0, mode_arg0)
% NBUCKETS), mode_arg0))
(we only check the reverse if not floating-point). */
else if (GET_CODE (folded_arg0) == REG)
{
- int qty = reg_qty[REGNO (folded_arg0)];
+ int qty = REG_QTY (REGNO (folded_arg0));
if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
&& (comparison_dominates_p (qty_comparison_code[qty], code)
&& rtx_equal_p (qty_comparison_const[qty],
const_arg1))
|| (GET_CODE (folded_arg1) == REG
- && (reg_qty[REGNO (folded_arg1)]
+ && (REG_QTY (REGNO (folded_arg1))
== qty_comparison_qty[qty]))))
return (comparison_dominates_p (qty_comparison_code[qty],
code)
hence not save anything) or be incorrect. */
if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT
&& INTVAL (const_arg1) < 0
- && - INTVAL (const_arg1) >= 0
+ /* This used to test
+
+ - INTVAL (const_arg1) >= 0
+
+ But The Sun V5.0 compilers mis-compiled that test. So
+ instead we test for the problematic value in a more direct
+ manner and hope the Sun compilers get it correct. */
+ && INTVAL (const_arg1) !=
+ ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
&& GET_CODE (folded_arg1) == REG)
{
rtx new_const = GEN_INT (- INTVAL (const_arg1));
identical powers of two with post decrement. */
if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
- && (0
-#if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
- || exact_log2 (INTVAL (const_arg1)) >= 0
-#endif
-#if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
- || exact_log2 (- INTVAL (const_arg1)) >= 0
-#endif
- ))
+ && ((HAVE_PRE_INCREMENT
+ && exact_log2 (INTVAL (const_arg1)) >= 0)
+ || (HAVE_POST_INCREMENT
+ && exact_log2 (INTVAL (const_arg1)) >= 0)
+ || (HAVE_PRE_DECREMENT
+ && exact_log2 (- INTVAL (const_arg1)) >= 0)
+ || (HAVE_POST_DECREMENT
+ && exact_log2 (- INTVAL (const_arg1)) >= 0)))
break;
/* Compute the code used to compose the constants. For example,
const_arg1 ? const_arg1 : folded_arg1,
const_arg2 ? const_arg2 : XEXP (x, 2));
break;
+
+ case 'x':
+ /* Always eliminate CONSTANT_P_RTX at this stage. */
+ if (code == CONSTANT_P_RTX)
+ return (const_arg0 ? const1_rtx : const0_rtx);
+ break;
}
return new ? new : x;
{
if (GET_CODE (x) == REG
&& REGNO_QTY_VALID_P (REGNO (x))
- && qty_const[reg_qty[REGNO (x)]])
- x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
+ && qty_const[REG_QTY (REGNO (x))])
+ x = gen_lowpart_if_possible (GET_MODE (x), qty_const[REG_QTY (REGNO (x))]);
- if (x != 0 && CONSTANT_P (x))
+ if (x == 0 || CONSTANT_P (x))
return x;
/* If X is a MEM, try to fold it outside the context of any insn to see if
new = gen_rtx_MEM (mode, plus_constant (XEXP (x, 0), offset));
if (! memory_address_p (mode, XEXP (new, 0)))
return 0;
- MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
- MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
+ MEM_COPY_ATTRIBUTES (new, x);
return new;
}
else
/* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
we know that they are also equal in the smaller mode (this is also
true for all smaller modes whether or not there is a SUBREG, but
- is not worth testing for with no SUBREG. */
+ is not worth testing for with no SUBREG). */
/* Note that GET_MODE (op0) may not equal MODE. */
if (code == EQ && GET_CODE (op0) == SUBREG
op0_elt->in_struct = op0_in_struct;
}
- qty_comparison_code[reg_qty[REGNO (op0)]] = code;
+ qty_comparison_code[REG_QTY (REGNO (op0))] = code;
if (GET_CODE (op1) == REG)
{
/* Look it up again--in case op0 and op1 are the same. */
op1_elt->in_struct = op1_in_struct;
}
- qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
- qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
+ qty_comparison_qty[REG_QTY (REGNO (op0))] = REG_QTY (REGNO (op1));
+ qty_comparison_const[REG_QTY (REGNO (op0))] = 0;
}
else
{
- qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
- qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
+ qty_comparison_qty[REG_QTY (REGNO (op0))] = -1;
+ qty_comparison_const[REG_QTY (REGNO (op0))] = op1;
}
return;
Then install the new sources and destinations in the table
of available values.
- If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
- the insn. */
+ If LIBCALL_INSN is nonzero, don't record any equivalence made in
+ the insn. It means that INSN is inside libcall block. In this
+ case LIBCALL_INSN is the corresponding insn with REG_LIBCALL. */
/* Data on one SET contained in the instruction. */
unsigned dest_hash;
/* The SET_DEST, with SUBREG, etc., stripped. */
rtx inner_dest;
- /* Place where the pointer to the INNER_DEST was found. */
- rtx *inner_dest_loc;
/* Nonzero if the SET_SRC is in memory. */
char src_in_memory;
/* Nonzero if the SET_SRC is in a structure. */
};
static void
-cse_insn (insn, in_libcall_block)
+cse_insn (insn, libcall_insn)
rtx insn;
- int in_libcall_block;
+ rtx libcall_insn;
{
register rtx x = PATTERN (insn);
register int i;
rtx tem;
register int n_sets = 0;
+#ifdef HAVE_cc0
/* Records what this insn does to set CC0. */
rtx this_insn_cc0 = 0;
enum machine_mode this_insn_cc0_mode = VOIDmode;
+#endif
rtx src_eqv = 0;
struct table_elt *src_eqv_elt = 0;
- int src_eqv_volatile;
- int src_eqv_in_memory;
- int src_eqv_in_struct;
- unsigned src_eqv_hash;
+ int src_eqv_volatile = 0;
+ int src_eqv_in_memory = 0;
+ int src_eqv_in_struct = 0;
+ unsigned src_eqv_hash = 0;
- struct set *sets;
+ struct set *sets = NULL_PTR;
this_insn = insn;
&& ((REGNO (new) < FIRST_PSEUDO_REGISTER)
!= (REGNO (src) < FIRST_PSEUDO_REGISTER)))
|| (insn_code = recog_memoized (insn)) < 0
- || insn_n_dups[insn_code] > 0)
+ || insn_data[insn_code].n_dups > 0)
validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
else
SET_SRC (sets[i].rtl) = new;
the current contents will be tested and will always be valid. */
while (1)
{
- rtx trial;
+ rtx trial, old_src;
/* Skip invalid entries. */
while (elt && GET_CODE (elt->exp) != REG
insert the substitution here and we will delete and re-emit
the insn later. */
+ /* Keep track of the original SET_SRC so that we can fix notes
+ on libcall instructions. */
+ old_src = SET_SRC (sets[i].rtl);
+
if (n_sets == 1 && dest == pc_rtx
&& (trial == pc_rtx
|| (GET_CODE (trial) == LABEL_REF
/* Look for a substitution that makes a valid insn. */
else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
{
+ /* If we just made a substitution inside a libcall, then we
+ need to make the same substitution in any notes attached
+ to the RETVAL insn. */
+ if (libcall_insn
+ && (GET_CODE (old_src) == REG
+ || GET_CODE (old_src) == SUBREG
+ || GET_CODE (old_src) == MEM))
+ replace_rtx (REG_NOTES (libcall_insn), old_src,
+ canon_reg (SET_SRC (sets[i].rtl), insn));
+
/* The result of apply_change_group can be ignored; see
canon_reg. */
their lifetimes will likely abut instead of overlapping. */
if (GET_CODE (dest) == REG
&& REGNO_QTY_VALID_P (REGNO (dest))
- && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
- && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
+ && qty_mode[REG_QTY (REGNO (dest))] == GET_MODE (dest)
+ && qty_first_reg[REG_QTY (REGNO (dest))] != REGNO (dest)
&& GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
/* Don't do this if the original insn had a hard reg as
- SET_SRC. */
+ SET_SRC or SET_DEST. */
&& (GET_CODE (sets[i].src) != REG
- || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
+ || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER)
+ && (GET_CODE (dest) != REG || REGNO (dest) >= FIRST_PSEUDO_REGISTER))
/* We can't call canon_reg here because it won't do anything if
SRC is a hard register. */
{
- int first = qty_first_reg[reg_qty[REGNO (src)]];
-
- src = SET_SRC (sets[i].rtl)
- = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
- : gen_rtx_REG (GET_MODE (src), first);
-
- /* If we had a constant that is cheaper than what we are now
- setting SRC to, use that constant. We ignored it when we
- thought we could make this into a no-op. */
- if (src_const && COST (src_const) < COST (src)
- && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
- src = src_const;
+ int first = qty_first_reg[REG_QTY (REGNO (src))];
+ rtx new_src
+ = (first >= FIRST_PSEUDO_REGISTER
+ ? regno_reg_rtx[first] : gen_rtx_REG (GET_MODE (src), first));
+
+ /* We must use validate-change even for this, because this
+ might be a special no-op instruction, suitable only to
+ tag notes onto. */
+ if (validate_change (insn, &SET_SRC (sets[i].rtl), new_src, 0))
+ {
+ src = new_src;
+ /* If we had a constant that is cheaper than what we are now
+ setting SRC to, use that constant. We ignored it when we
+ thought we could make this into a no-op. */
+ if (src_const && COST (src_const) < COST (src)
+ && validate_change (insn, &SET_SRC (sets[i].rtl), src_const,
+ 0))
+ src = src_const;
+ }
}
/* If we made a change, recompute SRC values. */
equivalent constant, we want to add a REG_NOTE. We don't want
to write a REG_EQUAL note for a constant pseudo since verifying that
that pseudo hasn't been eliminated is a pain. Such a note also
- won't help anything. */
+ won't help anything.
+
+ Avoid a REG_EQUAL note for (CONST (MINUS (LABEL_REF) (LABEL_REF)))
+ which can be created for a reference to a compile time computable
+ entry in a jump table. */
+
if (n_sets == 1 && src_const && GET_CODE (dest) == REG
- && GET_CODE (src_const) != REG)
+ && GET_CODE (src_const) != REG
+ && ! (GET_CODE (src_const) == CONST
+ && GET_CODE (XEXP (src_const, 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (src_const, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (src_const, 0), 1)) == LABEL_REF))
{
tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+ /* Make sure that the rtx is not shared with any other insn. */
+ src_const = copy_rtx (src_const);
+
/* Record the actual constant value in a REG_EQUAL note, making
a new one if one does not already exist. */
if (tem)
the last set for this quantity was for this register. */
if (REGNO_QTY_VALID_P (REGNO (dest))
- && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
+ && qty_const[REG_QTY (REGNO (dest))] == const0_rtx)
{
/* See if we previously had a REG_WAS_0 note. */
rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
- rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
+ rtx const_insn = qty_const_insn[REG_QTY (REGNO (dest))];
if ((tem = single_set (const_insn)) != 0
&& rtx_equal_p (SET_DEST (tem), dest))
if (note)
XEXP (note, 0) = const_insn;
else
- REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_WAS_0,
- const_insn,
- REG_NOTES (insn));
+ REG_NOTES (insn)
+ = gen_rtx_INSN_LIST (REG_WAS_0, const_insn,
+ REG_NOTES (insn));
}
}
}
/* Now deal with the destination. */
do_not_record = 0;
- sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
/* Look within any SIGN_EXTRACT or ZERO_EXTRACT
to the MEM or REG within it. */
|| GET_CODE (dest) == ZERO_EXTRACT
|| GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == STRICT_LOW_PART)
- {
- sets[i].inner_dest_loc = &XEXP (dest, 0);
- dest = XEXP (dest, 0);
- }
+ dest = XEXP (dest, 0);
sets[i].inner_dest = dest;
the insn. */
else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
{
+ /* One less use of the label this insn used to jump to. */
+ if (JUMP_LABEL (insn) != 0)
+ --LABEL_NUSES (JUMP_LABEL (insn));
PUT_CODE (insn, NOTE);
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
NOTE_SOURCE_FILE (insn) = 0;
cse_jumps_altered = 1;
- /* One less use of the label this insn used to jump to. */
- if (JUMP_LABEL (insn) != 0)
- --LABEL_NUSES (JUMP_LABEL (insn));
/* No more processing for this set. */
sets[i].rtl = 0;
}
it. If it was a computed branch, delete it and re-emit. */
else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
{
- rtx p;
-
/* If this is not in the format for a simple branch and
we are the only SET in it, re-emit it. */
if (! simplejump_p (insn) && n_sets == 1)
rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
JUMP_LABEL (new) = XEXP (src, 0);
LABEL_NUSES (XEXP (src, 0))++;
- delete_insn (insn);
insn = new;
}
else
Until the right place is found, might as well do this here. */
INSN_CODE (insn) = -1;
- /* Now that we've converted this jump to an unconditional jump,
- there is dead code after it. Delete the dead code until we
- reach a BARRIER, the end of the function, or a label. Do
- not delete NOTEs except for NOTE_INSN_DELETED since later
- phases assume these notes are retained. */
-
- p = insn;
-
- while (NEXT_INSN (p) != 0
- && GET_CODE (NEXT_INSN (p)) != BARRIER
- && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
- {
- if (GET_CODE (NEXT_INSN (p)) != NOTE
- || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
- delete_insn (NEXT_INSN (p));
- else
- p = NEXT_INSN (p);
- }
-
- /* If we don't have a BARRIER immediately after INSN, put one there.
- Much code assumes that there are no NOTEs between a JUMP_INSN and
- BARRIER. */
-
- if (NEXT_INSN (insn) == 0
- || GET_CODE (NEXT_INSN (insn)) != BARRIER)
- emit_barrier_before (NEXT_INSN (insn));
-
- /* We might have two BARRIERs separated by notes. Delete the second
- one if so. */
+ never_reached_warning (insn);
- if (p != insn && NEXT_INSN (p) != 0
- && GET_CODE (NEXT_INSN (p)) == BARRIER)
- delete_insn (NEXT_INSN (p));
+ /* Now emit a BARRIER after the unconditional jump. Do not bother
+ deleting any unreachable code, let jump/flow do that. */
+ if (NEXT_INSN (insn) != 0
+ && GET_CODE (NEXT_INSN (insn)) != BARRIER)
+ emit_barrier_after (insn);
cse_jumps_altered = 1;
sets[i].rtl = 0;
enum machine_mode mode
= GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
- if (sets[i].src_elt == 0)
+ /* Don't put a hard register source into the table if this is
+ the last insn of a libcall. */
+ if (sets[i].src_elt == 0
+ && (GET_CODE (src) != REG
+ || REGNO (src) >= FIRST_PSEUDO_REGISTER
+ || ! find_reg_note (insn, REG_RETVAL, NULL_RTX)))
{
register struct table_elt *elt;
invalidate (XEXP (dest, 0), GET_MODE (dest));
}
+ /* A volatile ASM invalidates everything. */
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
+ && MEM_VOLATILE_P (PATTERN (insn)))
+ flush_hash_table ();
+
/* Make sure registers mentioned in destinations
are safe for use in an expression to be inserted.
This removes from the hash table
we are going to hash the SET_DEST values unconditionally. */
for (i = 0; i < n_sets; i++)
- if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
- mention_regs (SET_DEST (sets[i].rtl));
+ {
+ if (sets[i].rtl)
+ {
+ rtx x = SET_DEST (sets[i].rtl);
+
+ if (GET_CODE (x) != REG)
+ mention_regs (x);
+ else
+ {
+ /* We used to rely on all references to a register becoming
+ inaccessible when a register changes to a new quantity,
+ since that changes the hash code. However, that is not
+ safe, since after NBUCKETS new quantities we get a
+ hash 'collision' of a register with its own invalid
+ entries. And since SUBREGs have been changed not to
+ change their hash code with the hash code of the register,
+ it wouldn't work any longer at all. So we have to check
+ for any invalid references lying around now.
+ This code is similar to the REG case in mention_regs,
+ but it knows that reg_tick has been incremented, and
+ it leaves reg_in_table as -1 . */
+ register int regno = REGNO (x);
+ register int endregno
+ = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (x)));
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ {
+ if (REG_IN_TABLE (i) >= 0)
+ {
+ remove_invalid_refs (i);
+ REG_IN_TABLE (i) = -1;
+ }
+ }
+ }
+ }
+ }
/* We may have just removed some of the src_elt's from the hash table.
So replace each one with the current head of the same class. */
if (sets[i].rtl)
{
register rtx dest = SET_DEST (sets[i].rtl);
+ rtx inner_dest = sets[i].inner_dest;
register struct table_elt *elt;
/* Don't record value if we are not supposed to risk allocating
since we might delete the libcall. Things should have been set
up so we won't want to reuse such a value, but we play it safe
here. */
- || in_libcall_block
+ || libcall_insn
/* If we didn't put a REG_EQUAL value or a source into the hash
table, there is no point is recording DEST. */
|| sets[i].src_elt == 0
sets[i].dest_hash = HASH (dest, GET_MODE (dest));
}
- elt = insert (dest, sets[i].src_elt,
- sets[i].dest_hash, GET_MODE (dest));
+ if (GET_CODE (inner_dest) == MEM
+ && GET_CODE (XEXP (inner_dest, 0)) == ADDRESSOF)
+ /* Given (SET (MEM (ADDRESSOF (X))) Y) we don't want to say
+ that (MEM (ADDRESSOF (X))) is equivalent to Y.
+ Consider the case in which the address of the MEM is
+ passed to a function, which alters the MEM. Then, if we
+ later use Y instead of the MEM we'll miss the update. */
+ elt = insert (dest, 0, sets[i].dest_hash, GET_MODE (dest));
+ else
+ elt = insert (dest, sets[i].src_elt,
+ sets[i].dest_hash, GET_MODE (dest));
+
elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
&& (! RTX_UNCHANGING_P (sets[i].inner_dest)
|| FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
then be used in the sequel and we may be changing a two-operand insn
into a three-operand insn.
- Also do not do this if we are operating on a copy of INSN. */
+ Also do not do this if we are operating on a copy of INSN.
+
+ Also don't do this if INSN ends a libcall; this would cause an unrelated
+ register to be set in the middle of a libcall, and we then get bad code
+ if the libcall is deleted. */
if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
&& NEXT_INSN (PREV_INSN (insn)) == insn
&& GET_CODE (SET_SRC (sets[0].rtl)) == REG
&& REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
&& REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
- && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
- == REGNO (SET_DEST (sets[0].rtl))))
+ && (qty_first_reg[REG_QTY (REGNO (SET_SRC (sets[0].rtl)))]
+ == REGNO (SET_DEST (sets[0].rtl)))
+ && ! find_reg_note (insn, REG_RETVAL, NULL_RTX))
{
rtx prev = PREV_INSN (insn);
while (prev && GET_CODE (prev) == NOTE)
prev_insn = insn;
}
\f
-/* Remove from the ahsh table all expressions that reference memory. */
+/* Remove from the hash table all expressions that reference memory. */
static void
invalidate_memory ()
{
&& GET_CODE (XEXP (addr, 0)) == REG
&& REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
{
- if (reg_tick[STACK_POINTER_REGNUM] >= 0)
- reg_tick[STACK_POINTER_REGNUM]++;
+ if (REG_TICK (STACK_POINTER_REGNUM) >= 0)
+ REG_TICK (STACK_POINTER_REGNUM)++;
/* This should be *very* rare. */
if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
rtx object;
{
enum rtx_code code = GET_CODE (x);
- char *fmt = GET_RTX_FORMAT (code);
+ const char *fmt = GET_RTX_FORMAT (code);
int i;
switch (code)
}
case REG:
- i = reg_qty[REGNO (x)];
+ i = REG_QTY (REGNO (x));
/* Return a constant or a constant register. */
if (REGNO_QTY_VALID_P (REGNO (x))
if (last_jump_equiv_class)
for (p = last_jump_equiv_class->first_same_value; p;
p = p->next_same_value)
- if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
- || (GET_CODE (p->exp) == SUBREG
- && GET_CODE (SUBREG_REG (p->exp)) == REG))
- invalidate (p->exp, VOIDmode);
- else if (GET_CODE (p->exp) == STRICT_LOW_PART
- || GET_CODE (p->exp) == ZERO_EXTRACT)
- invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
+ {
+ if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
+ || (GET_CODE (p->exp) == SUBREG
+ && GET_CODE (SUBREG_REG (p->exp)) == REG))
+ invalidate (p->exp, VOIDmode);
+ else if (GET_CODE (p->exp) == STRICT_LOW_PART
+ || GET_CODE (p->exp) == ZERO_EXTRACT)
+ invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
+ }
/* Process insns starting after LOOP_START until we hit a CALL_INSN or
a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
invalidate_for_call ();
}
+ invalidate_from_clobbers (PATTERN (insn));
note_stores (PATTERN (insn), invalidate_skipped_set);
}
}
static void
cse_check_loop_start (x, set)
rtx x;
- rtx set;
+ rtx set ATTRIBUTE_UNUSED;
{
if (cse_check_loop_start_value == 0
|| GET_CODE (x) == CC0 || GET_CODE (x) == PC)
if (cse_check_loop_start_value
&& validate_change (insn, &SET_SRC (x),
src_elt->exp, 0))
- emit_insn_after (gen_move_insn (src_elt->exp,
- SET_DEST (set)),
- p);
+ {
+ /* If this creates new pseudos, this is unsafe,
+ because the regno of new pseudo is unsuitable
+ to index into reg_qty when cse_insn processes
+ the new insn. Therefore, if a new pseudo was
+ created, discard this optimization. */
+ int nregs = max_reg_num ();
+ rtx move
+ = gen_move_insn (src_elt->exp, SET_DEST (set));
+ if (nregs != max_reg_num ())
+ {
+ if (! validate_change (insn, &SET_SRC (x),
+ SET_SRC (set), 0))
+ abort ();
+ }
+ else
+ emit_insn_after (move, p);
+ }
break;
}
}
path_size--;
}
+ /* If the first instruction is marked with QImode, that means we've
+ already processed this block. Our caller will look at DATA->LAST
+ to figure out where to go next. We want to return the next block
+ in the instruction stream, not some branched-to block somewhere
+ else. We accomplish this by pretending our called forbid us to
+ follow jumps, or skip blocks. */
+ if (GET_MODE (insn) == QImode)
+ follow_jumps = skip_blocks = 0;
+
/* Scan to end of this basic block. */
while (p && GET_CODE (p) != CODE_LABEL)
{
max_insn_uid = get_max_uid ();
- all_minus_one = (int *) alloca (nregs * sizeof (int));
- consec_ints = (int *) alloca (nregs * sizeof (int));
-
- for (i = 0; i < nregs; i++)
- {
- all_minus_one[i] = -1;
- consec_ints[i] = i;
- }
-
reg_next_eqv = (int *) alloca (nregs * sizeof (int));
reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
- reg_qty = (int *) alloca (nregs * sizeof (int));
- reg_in_table = (int *) alloca (nregs * sizeof (int));
- reg_tick = (int *) alloca (nregs * sizeof (int));
#ifdef LOAD_EXTEND_OP
|| global_regs[i])
SET_HARD_REG_BIT (regs_invalidated_by_call, i);
+ if (ggc_p)
+ ggc_push_context ();
+
/* Loop over basic blocks.
Compute the maximum number of qty's needed for each basic block
(which is 2 for each SET). */
max_qty = val.nsets * 2;
if (file)
- fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
+ fnotice (file, ";; Processing block from %d to %d, %d sets.\n",
INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
val.nsets);
cse_jumps_altered |= old_cse_jumps_altered;
}
+ if (ggc_p)
+ ggc_collect ();
+
#ifdef USE_C_ALLOCA
alloca (0);
#endif
}
+ if (ggc_p)
+ ggc_pop_context ();
+
/* Tell refers_to_mem_p that qty_const info is not available. */
qty_const = 0;
{
register rtx insn;
int to_usage = 0;
- int in_libcall_block = 0;
+ rtx libcall_insn = NULL_RTX;
int num_insns = 0;
/* Each of these arrays is undefined before max_reg, so only allocate
qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
- qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
+ qty_mode = (enum machine_mode *) alloca ((max_qty - max_reg)
+ * sizeof (enum machine_mode));
qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
qty_comparison_code
for (insn = from; insn != to; insn = NEXT_INSN (insn))
{
- register enum rtx_code code;
- int i;
- struct table_elt *p, *next;
+ register enum rtx_code code = GET_CODE (insn);
- /* If we have processed 1,000 insns, flush the hash table to avoid
- extreme quadratic behavior.
+ /* If we have processed 1,000 insns, flush the hash table to
+ avoid extreme quadratic behavior. We must not include NOTEs
+ in the count since there may be more or them when generating
+ debugging information. If we clear the table at different
+ times, code generated with -g -O might be different than code
+ generated with -O but not -g.
??? This is a real kludge and needs to be done some other way.
Perhaps for 2.9. */
- if (num_insns++ > 1000)
+ if (code != NOTE && num_insns++ > 1000)
{
- for (i = 0; i < NBUCKETS; i++)
- for (p = table[i]; p; p = next)
- {
- next = p->next_same_hash;
-
- if (GET_CODE (p->exp) == REG)
- invalidate (p->exp, p->mode);
- else
- remove_from_table (p, i);
- }
-
+ flush_hash_table ();
num_insns = 0;
}
}
}
- code = GET_CODE (insn);
if (GET_MODE (insn) == QImode)
PUT_MODE (insn, VOIDmode);
if (GET_RTX_CLASS (code) == 'i')
{
+ rtx p;
+
/* Process notes first so we have all notes in canonical forms when
looking for duplicate operations. */
its destination is the result of the block and hence should be
recorded. */
- if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- in_libcall_block = 1;
+ if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
+ libcall_insn = XEXP (p, 0);
else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- in_libcall_block = 0;
+ libcall_insn = NULL_RTX;
- cse_insn (insn, in_libcall_block);
+ cse_insn (insn, libcall_insn);
}
/* If INSN is now an unconditional jump, skip to the end of our
insn = NEXT_INSN (to);
- if (LABEL_NUSES (to) == 0)
- insn = delete_insn (to);
-
/* If TO was the last insn in the function, we are done. */
if (insn == 0)
return 0;
int incr;
{
enum rtx_code code;
- char *fmt;
+ const char *fmt;
int i, j;
if (x == 0)
case CONST_DOUBLE:
case SYMBOL_REF:
case LABEL_REF:
- case CLOBBER:
+ return;
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any registers inside the address
+ as being used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ count_reg_usage (XEXP (XEXP (x, 0), 0), counts, NULL_RTX, incr);
return;
case SET:
case EXPR_LIST:
case INSN_LIST:
if (REG_NOTE_KIND (x) == REG_EQUAL
- || GET_CODE (XEXP (x,0)) == USE)
+ || (REG_NOTE_KIND (x) != REG_NONNEG && GET_CODE (XEXP (x,0)) == USE))
count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
return;
/* Scan all the insns and delete any that are dead; i.e., they store a register
that is never used or they copy a register to itself.
- This is used to remove insns made obviously dead by cse. It improves the
- heuristics in loop since it won't try to move dead invariants out of loops
- or make givs for dead quantities. The remaining passes of the compilation
- are also sped up. */
+ This is used to remove insns made obviously dead by cse, loop or other
+ optimizations. It improves the heuristics in loop since it won't try to
+ move dead invariants out of loops or make givs for dead quantities. The
+ remaining passes of the compilation are also sped up. */
void
-delete_dead_from_cse (insns, nreg)
+delete_trivially_dead_insns (insns, nreg)
rtx insns;
int nreg;
{
int *counts = (int *) alloca (nreg * sizeof (int));
rtx insn, prev;
+#ifdef HAVE_cc0
rtx tem;
+#endif
int i;
- int in_libcall = 0;
+ int in_libcall = 0, dead_libcall = 0;
/* First count the number of times each register is used. */
bzero ((char *) counts, sizeof (int) * nreg);
/* Go from the last insn to the first and delete insns that only set unused
registers or copy a register to itself. As we delete an insn, remove
- usage counts for registers it uses. */
- for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
+ usage counts for registers it uses.
+
+ The first jump optimization pass may leave a real insn as the last
+ insn in the function. We must not skip that insn or we may end
+ up deleting code that is not really dead. */
+ insn = get_last_insn ();
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ insn = prev_real_insn (insn);
+
+ for ( ; insn; insn = prev)
{
int live_insn = 0;
+ rtx note;
prev = prev_real_insn (insn);
- /* Don't delete any insns that are part of a libcall block.
+ /* Don't delete any insns that are part of a libcall block unless
+ we can delete the whole libcall block.
+
Flow or loop might get confused if we did that. Remember
that we are scanning backwards. */
if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- in_libcall = 1;
+ {
+ in_libcall = 1;
+ live_insn = 1;
+ dead_libcall = 0;
- if (in_libcall)
- live_insn = 1;
+ /* See if there's a REG_EQUAL note on this insn and try to
+ replace the source with the REG_EQUAL expression.
+
+ We assume that insns with REG_RETVALs can only be reg->reg
+ copies at this point. */
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+ if (note)
+ {
+ rtx set = single_set (insn);
+ if (set
+ && validate_change (insn, &SET_SRC (set), XEXP (note, 0), 0))
+ {
+ remove_note (insn,
+ find_reg_note (insn, REG_RETVAL, NULL_RTX));
+ dead_libcall = 1;
+ }
+ }
+ }
+ else if (in_libcall)
+ live_insn = ! dead_libcall;
else if (GET_CODE (PATTERN (insn)) == SET)
{
if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
|| REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
|| counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
- || side_effects_p (SET_SRC (PATTERN (insn))))
+ || side_effects_p (SET_SRC (PATTERN (insn)))
+ /* An ADDRESSOF expression can turn into a use of the
+ internal arg pointer, so always consider the
+ internal arg pointer live. If it is truly dead,
+ flow will delete the initializing insn. */
+ || (SET_DEST (PATTERN (insn))
+ == current_function_internal_arg_pointer))
live_insn = 1;
}
else if (GET_CODE (PATTERN (insn)) == PARALLEL)
else if (GET_CODE (SET_DEST (elt)) != REG
|| REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
|| counts[REGNO (SET_DEST (elt))] != 0
- || side_effects_p (SET_SRC (elt)))
+ || side_effects_p (SET_SRC (elt))
+ /* An ADDRESSOF expression can turn into a use of the
+ internal arg pointer, so always consider the
+ internal arg pointer live. If it is truly dead,
+ flow will delete the initializing insn. */
+ || (SET_DEST (elt)
+ == current_function_internal_arg_pointer))
live_insn = 1;
}
else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
}
if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- in_libcall = 0;
+ {
+ in_libcall = 0;
+ dead_libcall = 0;
+ }
}
}