/* Global common subexpression elimination/Partial redundancy elimination
and global constant/copy propagation for GNU compiler.
- Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
+ Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is part of GCC.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* TODO
- reordering of memory allocation and freeing to be more space efficient
#include "cselib.h"
#include "intl.h"
#include "obstack.h"
+#include "timevar.h"
+#include "tree-pass.h"
+#include "hashtab.h"
/* Propagate flow information through back edges and thus enable PRE's
moving loop invariant calculations out of loops.
the result of the expression is copied to a new register, and the redundant
expression is deleted by replacing it with this new register. Classic GCSE
doesn't have this problem as much as it computes the reaching defs of
- each register in each block and thus can try to use an existing register.
-
- **********************
-
- A fair bit of simplicity is created by creating small functions for simple
- tasks, even when the function is only called in one place. This may
- measurably slow things down [or may not] by creating more function call
- overhead than is necessary. The source is laid out so that it's trivial
- to make the affected functions inline so that one can measure what speed
- up, if any, can be achieved, and maybe later when things settle things can
- be rearranged.
-
- Help stamp out big monolithic functions! */
+ each register in each block and thus can try to use an existing
+ register. */
\f
/* GCSE global vars. */
-/* -dG dump file. */
-static FILE *gcse_file;
-
/* Note whether or not we should run jump optimization after gcse. We
want to do this for two cases.
* If we added any labels via edge splitting. */
static int run_jump_opt_after_gcse;
-/* Bitmaps are normally not included in debugging dumps.
- However it's useful to be able to print them from GDB.
- We could create special functions for this, but it's simpler to
- just allow passing stderr to the dump_foo fns. Since stderr can
- be a macro, we store a copy here. */
-static FILE *debug_stderr;
-
/* An obstack for our working variables. */
static struct obstack gcse_obstack;
/* Get the cuid of an insn. */
#ifdef ENABLE_CHECKING
-#define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
+#define INSN_CUID(INSN) \
+ (gcc_assert (INSN_UID (INSN) <= max_uid), uid_cuid[INSN_UID (INSN)])
#else
#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
#endif
{
/* The next setting of this register. */
struct reg_set *next;
- /* The insn where it was set. */
- rtx insn;
+ /* The index of the block where it was set. */
+ int bb_index;
} reg_set;
static reg_set **reg_set_table;
/* This is a list of expressions which are MEMs and will be used by load
or store motion.
Load motion tracks MEMs which aren't killed by
- anything except itself. (ie, loads and stores to a single location).
+ anything except itself. (i.e., loads and stores to a single location).
We can then allow movement of these MEM refs with a little special
allowance. (all stores copy the same value to the reaching reg used
for the loads). This means all values used to store into memory must have
/* Head of the list of load/store memory refs. */
static struct ls_expr * pre_ldst_mems = NULL;
+/* Hashtable for the load/store memory refs. */
+static htab_t pre_ldst_table = NULL;
+
/* Bitmap containing one bit for each register in the program.
Used when performing GCSE to track which registers have been set since
the start of the basic block. */
/* Array, indexed by basic block number for a list of insns which modify
memory within that block. */
static rtx * modify_mem_list;
-bitmap modify_mem_list_set;
+static bitmap modify_mem_list_set;
/* This array parallels modify_mem_list, but is kept canonicalized. */
static rtx * canon_modify_mem_list;
-bitmap canon_modify_mem_list_set;
+
+/* Bitmap indexed by block numbers to record which blocks contain
+ function calls. */
+static bitmap blocks_with_calls;
+
/* Various variables for statistics gathering. */
/* Memory used in a pass.
static int gcse_subst_count;
/* Number of copy instructions created. */
static int gcse_create_count;
-/* Number of constants propagated. */
-static int const_prop_count;
-/* Number of copys propagated. */
-static int copy_prop_count;
+/* Number of local constants propagated. */
+static int local_const_prop_count;
+/* Number of local copies propagated. */
+static int local_copy_prop_count;
+/* Number of global constants propagated. */
+static int global_const_prop_count;
+/* Number of global copies propagated. */
+static int global_copy_prop_count;
\f
/* For available exprs */
static sbitmap *ae_kill, *ae_gen;
-
-/* Objects of this type are passed around by the null-pointer check
- removal routines. */
-struct null_pointer_info
-{
- /* The basic block being processed. */
- basic_block current_block;
- /* The first register to be handled in this pass. */
- unsigned int min_reg;
- /* One greater than the last register to be handled in this pass. */
- unsigned int max_reg;
- sbitmap *nonnull_local;
- sbitmap *nonnull_killed;
-};
\f
static void compute_can_copy (void);
static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
static void *grealloc (void *, size_t);
static void *gcse_alloc (unsigned long);
-static void alloc_gcse_mem (rtx);
+static void alloc_gcse_mem (void);
static void free_gcse_mem (void);
static void alloc_reg_set_mem (int);
static void free_reg_set_mem (void);
static void record_one_set (int, rtx);
-static void replace_one_set (int, rtx, rtx);
static void record_set_info (rtx, rtx, void *);
-static void compute_sets (rtx);
+static void compute_sets (void);
static void hash_scan_insn (rtx, struct hash_table *, int);
static void hash_scan_set (rtx, rtx, struct hash_table *);
static void hash_scan_clobber (rtx, rtx, struct hash_table *);
struct hash_table *);
static void insert_set_in_table (rtx, rtx, struct hash_table *);
static unsigned int hash_expr (rtx, enum machine_mode, int *, int);
-static unsigned int hash_expr_1 (rtx, enum machine_mode, int *);
-static unsigned int hash_string_1 (const char *);
static unsigned int hash_set (int, int);
static int expr_equiv_p (rtx, rtx);
static void record_last_reg_set_info (rtx, int);
static void free_hash_table (struct hash_table *);
static void compute_hash_table_work (struct hash_table *);
static void dump_hash_table (FILE *, const char *, struct hash_table *);
-static struct expr *lookup_expr (rtx, struct hash_table *);
static struct expr *lookup_set (unsigned int, struct hash_table *);
static struct expr *next_set (unsigned int, struct expr *);
static void reset_opr_set_tables (void);
static int cprop_insn (rtx, int);
static int cprop (int);
static void find_implicit_sets (void);
-static int one_cprop_pass (int, int, int);
-static bool constprop_register (rtx, rtx, rtx, int);
+static int one_cprop_pass (int, bool, bool);
+static bool constprop_register (rtx, rtx, rtx, bool);
static struct expr *find_bypass_set (int, int);
static bool reg_killed_on_edge (rtx, edge);
static int bypass_block (basic_block, rtx, rtx);
static void free_modify_mem_tables (void);
static rtx gcse_emit_move_after (rtx, rtx, rtx);
static void local_cprop_find_used_regs (rtx *, void *);
-static bool do_local_cprop (rtx, rtx, int, rtx*);
+static bool do_local_cprop (rtx, rtx, bool, rtx*);
static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
-static void local_cprop_pass (int);
+static void local_cprop_pass (bool);
static bool is_too_expensive (const char *);
\f
/* Entry point for global common subexpression elimination.
- F is the first instruction in the function. */
+ F is the first instruction in the function. Return nonzero if a
+ change is mode. */
-int
-gcse_main (rtx f, FILE *file)
+static int
+gcse_main (rtx f ATTRIBUTE_UNUSED)
{
int changed, pass;
/* Bytes used at start of pass. */
/* Assume that we do not need to run jump optimizations after gcse. */
run_jump_opt_after_gcse = 0;
- /* For calling dump_foo fns from gdb. */
- debug_stderr = stderr;
- gcse_file = file;
-
/* Identify the basic block information for this function, including
successors and predecessors. */
max_gcse_regno = max_reg_num ();
- if (file)
- dump_flow_info (file);
+ if (dump_file)
+ dump_flow_info (dump_file, dump_flags);
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled")))
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ || is_too_expensive (_("GCSE disabled")))
return 0;
gcc_obstack_init (&gcse_obstack);
information about memory sets when we build the hash tables. */
alloc_reg_set_mem (max_gcse_regno);
- compute_sets (f);
+ compute_sets ();
pass = 0;
initial_bytes_used = bytes_used;
while (changed && pass < MAX_GCSE_PASSES)
{
changed = 0;
- if (file)
- fprintf (file, "GCSE pass %d\n\n", pass + 1);
+ if (dump_file)
+ fprintf (dump_file, "GCSE pass %d\n\n", pass + 1);
/* Initialize bytes_used to the space for the pred/succ lists,
and the reg_set_table data. */
/* Each pass may create new registers, so recalculate each time. */
max_gcse_regno = max_reg_num ();
- alloc_gcse_mem (f);
+ alloc_gcse_mem ();
/* Don't allow constant propagation to modify jumps
during this pass. */
- changed = one_cprop_pass (pass + 1, 0, 0);
+ timevar_push (TV_CPROP1);
+ changed = one_cprop_pass (pass + 1, false, false);
+ timevar_pop (TV_CPROP1);
if (optimize_size)
/* Do nothing. */ ;
else
{
+ timevar_push (TV_PRE);
changed |= one_pre_gcse_pass (pass + 1);
/* We may have just created new basic blocks. Release and
recompute various things which are sized on the number of
}
free_reg_set_mem ();
alloc_reg_set_mem (max_reg_num ());
- compute_sets (f);
+ compute_sets ();
run_jump_opt_after_gcse = 1;
+ timevar_pop (TV_PRE);
}
if (max_pass_bytes < bytes_used)
for space, we don't run the partial redundancy algorithms). */
if (optimize_size)
{
+ timevar_push (TV_HOIST);
max_gcse_regno = max_reg_num ();
- alloc_gcse_mem (f);
+ alloc_gcse_mem ();
changed |= one_code_hoisting_pass ();
free_gcse_mem ();
if (max_pass_bytes < bytes_used)
max_pass_bytes = bytes_used;
+ timevar_pop (TV_HOIST);
}
- if (file)
+ if (dump_file)
{
- fprintf (file, "\n");
- fflush (file);
+ fprintf (dump_file, "\n");
+ fflush (dump_file);
}
obstack_free (&gcse_obstack, gcse_obstack_bottom);
conditional jumps. */
max_gcse_regno = max_reg_num ();
- alloc_gcse_mem (f);
+ alloc_gcse_mem ();
/* This time, go ahead and allow cprop to alter jumps. */
- one_cprop_pass (pass + 1, 1, 0);
+ timevar_push (TV_CPROP2);
+ one_cprop_pass (pass + 1, true, false);
+ timevar_pop (TV_CPROP2);
free_gcse_mem ();
- if (file)
+ if (dump_file)
{
- fprintf (file, "GCSE of %s: %d basic blocks, ",
+ fprintf (dump_file, "GCSE of %s: %d basic blocks, ",
current_function_name (), n_basic_blocks);
- fprintf (file, "%d pass%s, %d bytes\n\n",
+ fprintf (dump_file, "%d pass%s, %d bytes\n\n",
pass, pass > 1 ? "es" : "", max_pass_bytes);
}
allocate_reg_info (max_reg_num (), FALSE, FALSE);
if (!optimize_size && flag_gcse_sm)
- store_motion ();
+ {
+ timevar_push (TV_LSM);
+ store_motion ();
+ timevar_pop (TV_LSM);
+ }
/* Record where pseudo-registers are set. */
return run_jump_opt_after_gcse;
This is called at the start of each pass. */
static void
-alloc_gcse_mem (rtx f)
+alloc_gcse_mem (void)
{
int i;
+ basic_block bb;
rtx insn;
/* Find the largest UID and create a mapping from UIDs to CUIDs.
CUIDs are like UIDs except they increase monotonically, have no gaps,
- and only apply to real insns. */
+ and only apply to real insns.
+ (Actually, there are gaps, for insn that are not inside a basic block.
+ but we should never see those anyway, so this is OK.) */
max_uid = get_max_uid ();
uid_cuid = gcalloc (max_uid + 1, sizeof (int));
- for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn))
- uid_cuid[INSN_UID (insn)] = i++;
- else
- uid_cuid[INSN_UID (insn)] = i;
- }
+ i = 0;
+ FOR_EACH_BB (bb)
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (INSN_P (insn))
+ uid_cuid[INSN_UID (insn)] = i++;
+ else
+ uid_cuid[INSN_UID (insn)] = i;
+ }
/* Create a table mapping cuids to insns. */
max_cuid = i;
cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx));
- for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
- if (INSN_P (insn))
- CUID_INSN (i++) = insn;
+ i = 0;
+ FOR_EACH_BB (bb)
+ FOR_BB_INSNS (bb, insn)
+ if (INSN_P (insn))
+ CUID_INSN (i++) = insn;
/* Allocate vars to track sets of regs. */
- reg_set_bitmap = BITMAP_XMALLOC ();
+ reg_set_bitmap = BITMAP_ALLOC (NULL);
/* Allocate vars to track sets of regs, memory per block. */
reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
basic block. */
modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
- modify_mem_list_set = BITMAP_XMALLOC ();
- canon_modify_mem_list_set = BITMAP_XMALLOC ();
+ modify_mem_list_set = BITMAP_ALLOC (NULL);
+ blocks_with_calls = BITMAP_ALLOC (NULL);
}
/* Free memory allocated by alloc_gcse_mem. */
free (uid_cuid);
free (cuid_insn);
- BITMAP_XFREE (reg_set_bitmap);
+ BITMAP_FREE (reg_set_bitmap);
sbitmap_vector_free (reg_set_in_block);
free_modify_mem_tables ();
- BITMAP_XFREE (modify_mem_list_set);
- BITMAP_XFREE (canon_modify_mem_list_set);
+ BITMAP_FREE (modify_mem_list_set);
+ BITMAP_FREE (blocks_with_calls);
}
\f
/* Compute the local properties of each recorded expression.
obstack_free (®_set_obstack, NULL);
}
-/* An OLD_INSN that used to set REGNO was replaced by NEW_INSN.
- Update the corresponding `reg_set_table' entry accordingly.
- We assume that NEW_INSN is not already recorded in reg_set_table[regno]. */
-
-static void
-replace_one_set (int regno, rtx old_insn, rtx new_insn)
-{
- struct reg_set *reg_info;
- if (regno >= reg_set_table_size)
- return;
- for (reg_info = reg_set_table[regno]; reg_info; reg_info = reg_info->next)
- if (reg_info->insn == old_insn)
- {
- reg_info->insn = new_insn;
- break;
- }
-}
-
/* Record REGNO in the reg_set table. */
static void
new_reg_info = obstack_alloc (®_set_obstack, sizeof (struct reg_set));
bytes_used += sizeof (struct reg_set);
- new_reg_info->insn = insn;
+ new_reg_info->bb_index = BLOCK_NUM (insn);
new_reg_info->next = reg_set_table[regno];
reg_set_table[regno] = new_reg_info;
}
`reg_set_table' for further documentation. */
static void
-compute_sets (rtx f)
+compute_sets (void)
{
+ basic_block bb;
rtx insn;
- for (insn = f; insn != 0; insn = NEXT_INSN (insn))
- if (INSN_P (insn))
- note_stores (PATTERN (insn), record_set_info, insn);
+ FOR_EACH_BB (bb)
+ FOR_BB_INSNS (bb, insn)
+ if (INSN_P (insn))
+ note_stores (PATTERN (insn), record_set_info, insn);
}
\f
/* Hash table support. */
static int
want_to_gcse_p (rtx x)
{
+#ifdef STACK_REGS
+ /* On register stack architectures, don't GCSE constants from the
+ constant pool, as the benefits are often swamped by the overhead
+ of shuffling the register stack between basic blocks. */
+ if (IS_STACK_MODE (GET_MODE (x)))
+ x = avoid_constant_pool_reference (x);
+#endif
+
switch (GET_CODE (x))
{
case REG:
{
while (GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT
- || GET_CODE (dest) == SIGN_EXTRACT
|| GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p)
{
rtx list_entry = modify_mem_list[bb->index];
+
+ /* If this is a readonly then we aren't going to be changing it. */
+ if (MEM_READONLY_P (x))
+ return 0;
+
while (list_entry)
{
rtx setter;
MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
indicating if a volatile operand is found or if the expression contains
something we don't want to insert in the table. HASH_TABLE_SIZE is
- the current size of the hash table to be probed.
-
- ??? One might want to merge this with canon_hash. Later. */
+ the current size of the hash table to be probed. */
static unsigned int
hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p,
*do_not_record_p = 0;
- hash = hash_expr_1 (x, mode, do_not_record_p);
+ hash = hash_rtx (x, mode, do_not_record_p,
+ NULL, /*have_reg_qty=*/false);
return hash % hash_table_size;
}
-/* Hash a string. Just add its bytes up. */
-
-static inline unsigned
-hash_string_1 (const char *ps)
-{
- unsigned hash = 0;
- const unsigned char *p = (const unsigned char *) ps;
-
- if (p)
- while (*p)
- hash += *p++;
-
- return hash;
-}
-
-/* Subroutine of hash_expr to do the actual work. */
-
-static unsigned int
-hash_expr_1 (rtx x, enum machine_mode mode, int *do_not_record_p)
-{
- int i, j;
- unsigned hash = 0;
- enum rtx_code code;
- const char *fmt;
-
- if (x == 0)
- return hash;
-
- /* Used to turn recursion into iteration. We can't rely on GCC's
- tail-recursion elimination since we need to keep accumulating values
- in HASH. */
- repeat:
-
- code = GET_CODE (x);
- switch (code)
- {
- case REG:
- hash += ((unsigned int) REG << 7) + REGNO (x);
- return hash;
-
- case CONST_INT:
- hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
- + (unsigned int) INTVAL (x));
- return hash;
-
- case CONST_DOUBLE:
- /* This is like the general case, except that it only counts
- the integers representing the constant. */
- hash += (unsigned int) code + (unsigned int) GET_MODE (x);
- if (GET_MODE (x) != VOIDmode)
- for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
- hash += (unsigned int) XWINT (x, i);
- else
- hash += ((unsigned int) CONST_DOUBLE_LOW (x)
- + (unsigned int) CONST_DOUBLE_HIGH (x));
- return hash;
-
- case CONST_VECTOR:
- {
- int units;
- rtx elt;
-
- units = CONST_VECTOR_NUNITS (x);
-
- for (i = 0; i < units; ++i)
- {
- elt = CONST_VECTOR_ELT (x, i);
- hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
- }
-
- return hash;
- }
-
- /* Assume there is only one rtx object for any given label. */
- case LABEL_REF:
- /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
- differences and differences between each stage's debugging dumps. */
- hash += (((unsigned int) LABEL_REF << 7)
- + CODE_LABEL_NUMBER (XEXP (x, 0)));
- return hash;
-
- case SYMBOL_REF:
- {
- /* Don't hash on the symbol's address to avoid bootstrap differences.
- Different hash values may cause expressions to be recorded in
- different orders and thus different registers to be used in the
- final assembler. This also avoids differences in the dump files
- between various stages. */
- unsigned int h = 0;
- const unsigned char *p = (const unsigned char *) XSTR (x, 0);
-
- while (*p)
- h += (h << 7) + *p++; /* ??? revisit */
-
- hash += ((unsigned int) SYMBOL_REF << 7) + h;
- return hash;
- }
-
- case MEM:
- if (MEM_VOLATILE_P (x))
- {
- *do_not_record_p = 1;
- return 0;
- }
-
- hash += (unsigned int) MEM;
- /* We used alias set for hashing, but this is not good, since the alias
- set may differ in -fprofile-arcs and -fbranch-probabilities compilation
- causing the profiles to fail to match. */
- x = XEXP (x, 0);
- goto repeat;
-
- case PRE_DEC:
- case PRE_INC:
- case POST_DEC:
- case POST_INC:
- case PC:
- case CC0:
- case CALL:
- case UNSPEC_VOLATILE:
- *do_not_record_p = 1;
- return 0;
-
- case ASM_OPERANDS:
- if (MEM_VOLATILE_P (x))
- {
- *do_not_record_p = 1;
- return 0;
- }
- else
- {
- /* We don't want to take the filename and line into account. */
- hash += (unsigned) code + (unsigned) GET_MODE (x)
- + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
- + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
- + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
-
- if (ASM_OPERANDS_INPUT_LENGTH (x))
- {
- for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
- {
- hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
- GET_MODE (ASM_OPERANDS_INPUT (x, i)),
- do_not_record_p)
- + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
- (x, i)));
- }
-
- hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
- x = ASM_OPERANDS_INPUT (x, 0);
- mode = GET_MODE (x);
- goto repeat;
- }
- return hash;
- }
-
- default:
- break;
- }
-
- hash += (unsigned) code + (unsigned) GET_MODE (x);
- for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
- {
- if (fmt[i] == 'e')
- {
- /* If we are about to do the last recursive call
- needed at this level, change it into iteration.
- This function is called enough to be worth it. */
- if (i == 0)
- {
- x = XEXP (x, i);
- goto repeat;
- }
-
- hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
- if (*do_not_record_p)
- return 0;
- }
-
- else if (fmt[i] == 'E')
- for (j = 0; j < XVECLEN (x, i); j++)
- {
- hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
- if (*do_not_record_p)
- return 0;
- }
-
- else if (fmt[i] == 's')
- hash += hash_string_1 (XSTR (x, i));
- else if (fmt[i] == 'i')
- hash += (unsigned int) XINT (x, i);
- else
- abort ();
- }
-
- return hash;
-}
-
/* Hash a set of register REGNO.
Sets are hashed on the register that is set. This simplifies the PRE copy
return hash % hash_table_size;
}
-/* Return nonzero if exp1 is equivalent to exp2.
- ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
+/* Return nonzero if exp1 is equivalent to exp2. */
static int
expr_equiv_p (rtx x, rtx y)
{
- int i, j;
- enum rtx_code code;
- const char *fmt;
-
- if (x == y)
- return 1;
-
- if (x == 0 || y == 0)
- return 0;
-
- code = GET_CODE (x);
- if (code != GET_CODE (y))
- return 0;
-
- /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
- if (GET_MODE (x) != GET_MODE (y))
- return 0;
-
- switch (code)
- {
- case PC:
- case CC0:
- case CONST_INT:
- return 0;
-
- case LABEL_REF:
- return XEXP (x, 0) == XEXP (y, 0);
-
- case SYMBOL_REF:
- return XSTR (x, 0) == XSTR (y, 0);
-
- case REG:
- return REGNO (x) == REGNO (y);
-
- case MEM:
- /* Can't merge two expressions in different alias sets, since we can
- decide that the expression is transparent in a block when it isn't,
- due to it being set with the different alias set. */
- if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
- return 0;
-
- /* A volatile mem should not be considered equivalent to any other. */
- if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
- return 0;
- break;
-
- /* For commutative operations, check both orders. */
- case PLUS:
- case MULT:
- case AND:
- case IOR:
- case XOR:
- case NE:
- case EQ:
- return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
- && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
- || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
- && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
-
- case ASM_OPERANDS:
- /* We don't use the generic code below because we want to
- disregard filename and line numbers. */
-
- /* A volatile asm isn't equivalent to any other. */
- if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
- return 0;
-
- if (GET_MODE (x) != GET_MODE (y)
- || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
- || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
- ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
- || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
- || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
- return 0;
-
- if (ASM_OPERANDS_INPUT_LENGTH (x))
- {
- for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
- if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
- ASM_OPERANDS_INPUT (y, i))
- || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
- ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
- return 0;
- }
-
- return 1;
-
- default:
- break;
- }
-
- /* Compare the elements. If any pair of corresponding elements
- fail to match, return 0 for the whole thing. */
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- switch (fmt[i])
- {
- case 'e':
- if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
- return 0;
- break;
-
- case 'E':
- if (XVECLEN (x, i) != XVECLEN (y, i))
- return 0;
- for (j = 0; j < XVECLEN (x, i); j++)
- if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
- return 0;
- break;
-
- case 's':
- if (strcmp (XSTR (x, i), XSTR (y, i)))
- return 0;
- break;
-
- case 'i':
- if (XINT (x, i) != XINT (y, i))
- return 0;
- break;
-
- case 'w':
- if (XWINT (x, i) != XWINT (y, i))
- return 0;
- break;
-
- case '0':
- break;
-
- default:
- abort ();
- }
- }
-
- return 1;
+ return exp_equiv_p (x, y, 0, true);
}
/* Insert expression X in INSN in the hash TABLE.
unsigned int hash;
struct expr *cur_expr, *last_expr = NULL;
struct occr *antic_occr, *avail_occr;
- struct occr *last_occr = NULL;
hash = hash_expr (x, mode, &do_not_record_p, table->size);
{
antic_occr = cur_expr->antic_occr;
- /* Search for another occurrence in the same basic block. */
- while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
- {
- /* If an occurrence isn't found, save a pointer to the end of
- the list. */
- last_occr = antic_occr;
- antic_occr = antic_occr->next;
- }
+ if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
+ antic_occr = NULL;
if (antic_occr)
/* Found another instance of the expression in the same basic block.
/* First occurrence of this expression in this basic block. */
antic_occr = gcse_alloc (sizeof (struct occr));
bytes_used += sizeof (struct occr);
- /* First occurrence of this expression in any block? */
- if (cur_expr->antic_occr == NULL)
- cur_expr->antic_occr = antic_occr;
- else
- last_occr->next = antic_occr;
-
antic_occr->insn = insn;
- antic_occr->next = NULL;
+ antic_occr->next = cur_expr->antic_occr;
antic_occr->deleted_p = 0;
+ cur_expr->antic_occr = antic_occr;
}
}
{
avail_occr = cur_expr->avail_occr;
- /* Search for another occurrence in the same basic block. */
- while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
+ if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
{
- /* If an occurrence isn't found, save a pointer to the end of
- the list. */
- last_occr = avail_occr;
- avail_occr = avail_occr->next;
+ /* Found another instance of the expression in the same basic block.
+ Prefer this occurrence to the currently recorded one. We want
+ the last one in the block and the block is scanned from start
+ to end. */
+ avail_occr->insn = insn;
}
-
- if (avail_occr)
- /* Found another instance of the expression in the same basic block.
- Prefer this occurrence to the currently recorded one. We want
- the last one in the block and the block is scanned from start
- to end. */
- avail_occr->insn = insn;
else
{
/* First occurrence of this expression in this basic block. */
avail_occr = gcse_alloc (sizeof (struct occr));
bytes_used += sizeof (struct occr);
-
- /* First occurrence of this expression in any block? */
- if (cur_expr->avail_occr == NULL)
- cur_expr->avail_occr = avail_occr;
- else
- last_occr->next = avail_occr;
-
avail_occr->insn = insn;
- avail_occr->next = NULL;
+ avail_occr->next = cur_expr->avail_occr;
avail_occr->deleted_p = 0;
+ cur_expr->avail_occr = avail_occr;
}
}
}
int found;
unsigned int hash;
struct expr *cur_expr, *last_expr = NULL;
- struct occr *cur_occr, *last_occr = NULL;
+ struct occr *cur_occr;
- if (GET_CODE (x) != SET
- || ! REG_P (SET_DEST (x)))
- abort ();
+ gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
hash = hash_set (REGNO (SET_DEST (x)), table->size);
/* Now record the occurrence. */
cur_occr = cur_expr->avail_occr;
- /* Search for another occurrence in the same basic block. */
- while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
+ if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
{
- /* If an occurrence isn't found, save a pointer to the end of
- the list. */
- last_occr = cur_occr;
- cur_occr = cur_occr->next;
+ /* Found another instance of the expression in the same basic block.
+ Prefer this occurrence to the currently recorded one. We want
+ the last one in the block and the block is scanned from start
+ to end. */
+ cur_occr->insn = insn;
}
-
- if (cur_occr)
- /* Found another instance of the expression in the same basic block.
- Prefer this occurrence to the currently recorded one. We want the
- last one in the block and the block is scanned from start to end. */
- cur_occr->insn = insn;
else
{
/* First occurrence of this expression in this basic block. */
cur_occr = gcse_alloc (sizeof (struct occr));
bytes_used += sizeof (struct occr);
- /* First occurrence of this expression in any block? */
- if (cur_expr->avail_occr == NULL)
- cur_expr->avail_occr = cur_occr;
- else
- last_occr->next = cur_occr;
-
- cur_occr->insn = insn;
- cur_occr->next = NULL;
- cur_occr->deleted_p = 0;
+ cur_occr->insn = insn;
+ cur_occr->next = cur_expr->avail_occr;
+ cur_occr->deleted_p = 0;
+ cur_expr->avail_occr = cur_occr;
}
}
rtx dest = SET_DEST (pat);
rtx note;
- if (CALL_P (src))
+ if (GET_CODE (src) == CALL)
hash_scan_call (src, insn, table);
else if (REG_P (dest))
unsigned int regno = REGNO (dest);
rtx tmp;
- /* If this is a single set and we are doing constant propagation,
- see if a REG_NOTE shows this equivalent to a constant. */
- if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0
- && gcse_constant_p (XEXP (note, 0)))
+ /* See if a REG_NOTE shows this equivalent to a simpler expression.
+ This allows us to do a single GCSE pass and still eliminate
+ redundant constants, addresses or other expressions that are
+ constructed with multiple instructions. */
+ note = find_reg_equal_equiv_note (insn);
+ if (note != 0
+ && (table->set_p
+ ? gcse_constant_p (XEXP (note, 0))
+ : want_to_gcse_p (XEXP (note, 0))))
src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
/* Only record sets of pseudo-regs in the hash table. */
REG_EQUIV notes and if the argument slot is used somewhere
explicitly, it means address of parameter has been taken,
so we should not extend the lifetime of the pseudo. */
- && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
- || ! MEM_P (XEXP (note, 0))))
+ && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
{
/* An expression is not anticipatable if its operands are
modified before this insn or if this is not the only SET in
hash_scan_set (x, insn, table);
else if (GET_CODE (x) == CLOBBER)
hash_scan_clobber (x, insn, table);
- else if (CALL_P (x))
+ else if (GET_CODE (x) == CALL)
hash_scan_call (x, insn, table);
}
else if (GET_CODE (pat) == CLOBBER)
hash_scan_clobber (pat, insn, table);
- else if (CALL_P (pat))
+ else if (GET_CODE (pat) == CALL)
hash_scan_call (pat, insn, table);
}
while (GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT
- || GET_CODE (dest) == SIGN_EXTRACT
|| GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
canon_modify_mem_list[bb] =
alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
- bitmap_set_bit (canon_modify_mem_list_set, bb);
}
/* Record memory modification information for INSN. We do not actually care
need to insert a pair of items, as canon_list_insert does. */
canon_modify_mem_list[bb] =
alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
- bitmap_set_bit (canon_modify_mem_list_set, bb);
+ bitmap_set_bit (blocks_with_calls, bb);
}
else
note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
??? hard-reg reg_set_in_block computation
could be moved to compute_sets since they currently don't change. */
- for (insn = BB_HEAD (current_bb);
- insn && insn != NEXT_INSN (BB_END (current_bb));
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (current_bb, insn)
{
if (! INSN_P (insn))
continue;
if (CALL_P (insn))
{
- bool clobbers_all = false;
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP
- && find_reg_note (insn, REG_SETJMP, NULL_RTX))
- clobbers_all = true;
-#endif
-
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (clobbers_all
- || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
record_last_reg_set_info (insn, regno);
mark_call (insn);
BB_HEAD (current_bb), table);
/* The next pass builds the hash table. */
-
- for (insn = BB_HEAD (current_bb), in_libcall_block = 0;
- insn && insn != NEXT_INSN (BB_END (current_bb));
- insn = NEXT_INSN (insn))
+ in_libcall_block = 0;
+ FOR_BB_INSNS (current_bb, insn)
if (INSN_P (insn))
{
if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
\f
/* Expression tracking support. */
-/* Lookup pattern PAT in the expression TABLE.
- The result is a pointer to the table entry, or NULL if not found. */
-
-static struct expr *
-lookup_expr (rtx pat, struct hash_table *table)
-{
- int do_not_record_p;
- unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
- table->size);
- struct expr *expr;
-
- if (do_not_record_p)
- return NULL;
-
- expr = table->table[hash];
-
- while (expr && ! expr_equiv_p (expr->expr, pat))
- expr = expr->next_same_hash;
-
- return expr;
-}
-
/* Lookup REGNO in the set TABLE. The result is a pointer to the
table entry, or NULL if not found. */
static void
clear_modify_mem_tables (void)
{
- int i;
+ unsigned i;
+ bitmap_iterator bi;
- EXECUTE_IF_SET_IN_BITMAP
- (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
+ EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
+ {
+ free_INSN_LIST_list (modify_mem_list + i);
+ free_insn_expr_list_list (canon_modify_mem_list + i);
+ }
bitmap_clear (modify_mem_list_set);
-
- EXECUTE_IF_SET_IN_BITMAP
- (canon_modify_mem_list_set, 0, i,
- free_insn_expr_list_list (canon_modify_mem_list + i));
- bitmap_clear (canon_modify_mem_list_set);
+ bitmap_clear (blocks_with_calls);
}
-/* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
+/* Release memory used by modify_mem_list_set. */
static void
free_modify_mem_tables (void)
while (GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT
- || GET_CODE (dest) == SIGN_EXTRACT
|| GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
else if (MEM_P (dest))
record_last_mem_set_info (insn);
- if (CALL_P (SET_SRC (pat)))
+ if (GET_CODE (SET_SRC (pat)) == CALL)
mark_call (insn);
}
mark_set (x, insn);
else if (GET_CODE (x) == CLOBBER)
mark_clobber (x, insn);
- else if (CALL_P (x))
+ else if (GET_CODE (x) == CALL)
mark_call (insn);
}
else if (GET_CODE (pat) == CLOBBER)
mark_clobber (pat, insn);
- else if (CALL_P (pat))
+ else if (GET_CODE (pat) == CALL)
mark_call (insn);
}
else
{
for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
- SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
+ SET_BIT (bmap[r->bb_index], indx);
}
}
else
else
{
for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
- RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
+ RESET_BIT (bmap[r->bb_index], indx);
}
}
return;
case MEM:
- FOR_EACH_BB (bb)
+ if (! MEM_READONLY_P (x))
{
- rtx list_entry = canon_modify_mem_list[bb->index];
+ bitmap_iterator bi;
+ unsigned bb_index;
- while (list_entry)
+ /* First handle all the blocks with calls. We don't need to
+ do any list walking for them. */
+ EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
{
- rtx dest, dest_addr;
+ if (set_p)
+ SET_BIT (bmap[bb_index], indx);
+ else
+ RESET_BIT (bmap[bb_index], indx);
+ }
- if (CALL_P (XEXP (list_entry, 0)))
- {
- if (set_p)
- SET_BIT (bmap[bb->index], indx);
- else
- RESET_BIT (bmap[bb->index], indx);
- break;
- }
- /* LIST_ENTRY must be an INSN of some kind that sets memory.
- Examine each hunk of memory that is modified. */
+ /* Now iterate over the blocks which have memory modifications
+ but which do not have any calls. */
+ EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
+ blocks_with_calls,
+ 0, bb_index, bi)
+ {
+ rtx list_entry = canon_modify_mem_list[bb_index];
- dest = XEXP (list_entry, 0);
- list_entry = XEXP (list_entry, 1);
- dest_addr = XEXP (list_entry, 0);
+ while (list_entry)
+ {
+ rtx dest, dest_addr;
- if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
- x, rtx_addr_varies_p))
- {
- if (set_p)
- SET_BIT (bmap[bb->index], indx);
- else
- RESET_BIT (bmap[bb->index], indx);
- break;
- }
- list_entry = XEXP (list_entry, 1);
- }
+ /* LIST_ENTRY must be an INSN of some kind that sets memory.
+ Examine each hunk of memory that is modified. */
+
+ dest = XEXP (list_entry, 0);
+ list_entry = XEXP (list_entry, 1);
+ dest_addr = XEXP (list_entry, 0);
+
+ if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
+ x, rtx_addr_varies_p))
+ {
+ if (set_p)
+ SET_BIT (bmap[bb_index], indx);
+ else
+ RESET_BIT (bmap[bb_index], indx);
+ break;
+ }
+ list_entry = XEXP (list_entry, 1);
+ }
+ }
}
x = XEXP (x, 0);
validate_change (insn, &SET_SRC (set), src, 0);
}
- /* If there is already a NOTE, update the expression in it with our
- replacement. */
- if (note != 0)
+ /* If there is already a REG_EQUAL note, update the expression in it
+ with our replacement. */
+ if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
have a note, and have no special SET, add a REG_EQUAL note to not
lose information. */
if (!success && note == 0 && set != 0
- && GET_CODE (XEXP (set, 0)) != ZERO_EXTRACT
- && GET_CODE (XEXP (set, 0)) != SIGN_EXTRACT)
+ && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
}
We don't allow that. Remove that note. This code ought
not to happen, because previous code ought to synthesize
reg-reg move, but be on the safe side. */
- if (note && REG_P (XEXP (note, 0)))
+ if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
remove_note (insn, note);
return success;
struct expr *set1 = 0;
/* Loops are not possible here. To get a loop we would need two sets
- available at the start of the block containing INSN. ie we would
+ available at the start of the block containing INSN. i.e. we would
need two sets like this available at the start of the block:
(set (reg X) (reg Y))
if (set == 0)
break;
- if (GET_CODE (set->expr) != SET)
- abort ();
+ gcc_assert (GET_CODE (set->expr) == SET);
src = SET_SRC (set->expr);
if (! REG_P (src))
break;
- /* Follow the copy chain, ie start another iteration of the loop
+ /* Follow the copy chain, i.e. start another iteration of the loop
and see if we have an available copy into SRC. */
regno = REGNO (src);
}
run_jump_opt_after_gcse = 1;
- const_prop_count++;
- if (gcse_file != NULL)
+ global_const_prop_count++;
+ if (dump_file != NULL)
{
- fprintf (gcse_file,
- "CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
+ fprintf (dump_file,
+ "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
REGNO (from), INSN_UID (jump));
- print_rtl (gcse_file, src);
- fprintf (gcse_file, "\n");
+ print_rtl (dump_file, src);
+ fprintf (dump_file, "\n");
}
purge_dead_edges (bb);
}
static bool
-constprop_register (rtx insn, rtx from, rtx to, int alter_jumps)
+constprop_register (rtx insn, rtx from, rtx to, bool alter_jumps)
{
rtx sset;
}
/* Handle normal insns next. */
- if (GET_CODE (insn) == INSN
+ if (NONJUMP_INSN_P (insn)
&& try_replace_reg (from, to, insn))
return 1;
pat = set->expr;
/* ??? We might be able to handle PARALLELs. Later. */
- if (GET_CODE (pat) != SET)
- abort ();
+ gcc_assert (GET_CODE (pat) == SET);
src = SET_SRC (pat);
if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
{
changed = 1;
- const_prop_count++;
- if (gcse_file != NULL)
+ global_const_prop_count++;
+ if (dump_file != NULL)
{
- fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
- fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn));
- print_rtl (gcse_file, src);
- fprintf (gcse_file, "\n");
+ fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
+ fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
+ print_rtl (dump_file, src);
+ fprintf (dump_file, "\n");
}
if (INSN_DELETED_P (insn))
return 1;
if (try_replace_reg (reg_used->reg_rtx, src, insn))
{
changed = 1;
- copy_prop_count++;
- if (gcse_file != NULL)
+ global_copy_prop_count++;
+ if (dump_file != NULL)
{
- fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
+ fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
regno, INSN_UID (insn));
- fprintf (gcse_file, " with reg %d\n", REGNO (src));
+ fprintf (dump_file, " with reg %d\n", REGNO (src));
}
/* The original insn setting reg_used may or may not now be
their REG_EQUAL notes need updating. */
static bool
-do_local_cprop (rtx x, rtx insn, int alter_jumps, rtx *libcall_sp)
+do_local_cprop (rtx x, rtx insn, bool alter_jumps, rtx *libcall_sp)
{
rtx newreg = NULL, newcnst = NULL;
rtx this_rtx = l->loc;
rtx note;
- if (l->in_libcall)
+ /* Don't CSE non-constant values out of libcall blocks. */
+ if (l->in_libcall && ! CONSTANT_P (this_rtx))
continue;
if (gcse_constant_p (this_rtx))
or fix delete_trivially_dead_insns to preserve the setting insn,
or make it delete the REG_EUAQL note, and fix up all passes that
require the REG_EQUAL note there. */
- if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp))
- abort ();
- if (gcse_file != NULL)
+ bool adjusted;
+
+ adjusted = adjust_libcall_notes (x, newcnst, insn, libcall_sp);
+ gcc_assert (adjusted);
+
+ if (dump_file != NULL)
{
- fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
+ fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
REGNO (x));
- fprintf (gcse_file, "insn %d with constant ",
+ fprintf (dump_file, "insn %d with constant ",
INSN_UID (insn));
- print_rtl (gcse_file, newcnst);
- fprintf (gcse_file, "\n");
+ print_rtl (dump_file, newcnst);
+ fprintf (dump_file, "\n");
}
- const_prop_count++;
+ local_const_prop_count++;
return true;
}
else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
{
adjust_libcall_notes (x, newreg, insn, libcall_sp);
- if (gcse_file != NULL)
+ if (dump_file != NULL)
{
- fprintf (gcse_file,
+ fprintf (dump_file,
"LOCAL COPY-PROP: Replacing reg %d in insn %d",
REGNO (x), INSN_UID (insn));
- fprintf (gcse_file, " with reg %d\n", REGNO (newreg));
+ fprintf (dump_file, " with reg %d\n", REGNO (newreg));
}
- copy_prop_count++;
+ local_copy_prop_count++;
return true;
}
}
return true;
}
}
- XEXP (note, 0) = replace_rtx (XEXP (note, 0), oldreg, newval);
+ XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), oldreg, newval);
insn = end;
}
return true;
#define MAX_NESTED_LIBCALLS 9
+/* Do local const/copy propagation (i.e. within each basic block).
+ If ALTER_JUMPS is true, allow propagating into jump insns, which
+ could modify the CFG. */
+
static void
-local_cprop_pass (int alter_jumps)
+local_cprop_pass (bool alter_jumps)
{
+ basic_block bb;
rtx insn;
struct reg_use *reg_used;
rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
cselib_init (false);
libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
*libcall_sp = 0;
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ FOR_EACH_BB (bb)
{
- if (INSN_P (insn))
+ FOR_BB_INSNS (bb, insn)
{
- rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
-
- if (note)
- {
- if (libcall_sp == libcall_stack)
- abort ();
- *--libcall_sp = XEXP (note, 0);
- }
- note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
- if (note)
- libcall_sp++;
- note = find_reg_equal_equiv_note (insn);
- do
+ if (INSN_P (insn))
{
- reg_use_count = 0;
- note_uses (&PATTERN (insn), local_cprop_find_used_regs, NULL);
- if (note)
- local_cprop_find_used_regs (&XEXP (note, 0), NULL);
+ rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
- for (reg_used = ®_use_table[0]; reg_use_count > 0;
- reg_used++, reg_use_count--)
- if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
- libcall_sp))
- {
- changed = true;
+ if (note)
+ {
+ gcc_assert (libcall_sp != libcall_stack);
+ *--libcall_sp = XEXP (note, 0);
+ }
+ note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ libcall_sp++;
+ note = find_reg_equal_equiv_note (insn);
+ do
+ {
+ reg_use_count = 0;
+ note_uses (&PATTERN (insn), local_cprop_find_used_regs,
+ NULL);
+ if (note)
+ local_cprop_find_used_regs (&XEXP (note, 0), NULL);
+
+ for (reg_used = ®_use_table[0]; reg_use_count > 0;
+ reg_used++, reg_use_count--)
+ if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
+ libcall_sp))
+ {
+ changed = true;
+ break;
+ }
+ if (INSN_DELETED_P (insn))
break;
- }
- if (INSN_DELETED_P (insn))
- break;
+ }
+ while (reg_use_count);
}
- while (reg_use_count);
+ cselib_process_insn (insn);
}
- cselib_process_insn (insn);
+
+ /* Forget everything at the end of a basic block. Make sure we are
+ not inside a libcall, they should never cross basic blocks. */
+ cselib_clear_table ();
+ gcc_assert (libcall_sp == &libcall_stack[MAX_NESTED_LIBCALLS]);
}
+
cselib_finish ();
+
/* Global analysis may get into infinite loops for unreachable blocks. */
if (changed && alter_jumps)
{
delete_unreachable_blocks ();
free_reg_set_mem ();
alloc_reg_set_mem (max_reg_num ());
- compute_sets (get_insns ());
+ compute_sets ();
}
}
/* Note we start at block 1. */
if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
{
- if (gcse_file != NULL)
- fprintf (gcse_file, "\n");
+ if (dump_file != NULL)
+ fprintf (dump_file, "\n");
return 0;
}
start of the block]. */
reset_opr_set_tables ();
- for (insn = BB_HEAD (bb);
- insn != NULL && insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
changed |= cprop_insn (insn, alter_jumps);
}
}
- if (gcse_file != NULL)
- fprintf (gcse_file, "\n");
+ if (dump_file != NULL)
+ fprintf (dump_file, "\n");
return changed;
}
settle for the condition variable in the jump instruction being integral.
We prefer to be able to record the value of a user variable, rather than
the value of a temporary used in a condition. This could be solved by
- recording the value of *every* register scaned by canonicalize_condition,
+ recording the value of *every* register scanned by canonicalize_condition,
but this would require some code reorganization. */
rtx
fis_get_condition (rtx jump)
{
- rtx cond, set, tmp, insn, earliest;
- bool reverse;
-
- if (! any_condjump_p (jump))
- return NULL_RTX;
-
- set = pc_set (jump);
- cond = XEXP (SET_SRC (set), 0);
-
- /* If this branches to JUMP_LABEL when the condition is false,
- reverse the condition. */
- reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
- && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump));
-
- /* Use canonicalize_condition to do the dirty work of manipulating
- MODE_CC values and COMPARE rtx codes. */
- tmp = canonicalize_condition (jump, cond, reverse, &earliest, NULL_RTX,
- false);
- if (!tmp)
- return NULL_RTX;
-
- /* Verify that the given condition is valid at JUMP by virtue of not
- having been modified since EARLIEST. */
- for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
- if (INSN_P (insn) && modified_in_p (tmp, insn))
- break;
- if (insn == jump)
- return tmp;
-
- /* The condition was modified. See if we can get a partial result
- that doesn't follow all the reversals. Perhaps combine can fold
- them together later. */
- tmp = XEXP (tmp, 0);
- if (!REG_P (tmp) || GET_MODE_CLASS (GET_MODE (tmp)) != MODE_INT)
- return NULL_RTX;
- tmp = canonicalize_condition (jump, cond, reverse, &earliest, tmp,
- false);
- if (!tmp)
- return NULL_RTX;
-
- /* For sanity's sake, re-validate the new result. */
- for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
- if (INSN_P (insn) && modified_in_p (tmp, insn))
- return NULL_RTX;
-
- return tmp;
+ return get_condition (jump, NULL, false, true);
}
/* Check the comparison COND to see if we can safely form an implicit set from
count = 0;
FOR_EACH_BB (bb)
/* Check for more than one successor. */
- if (bb->succ && bb->succ->succ_next)
+ if (EDGE_COUNT (bb->succs) > 1)
{
cond = fis_get_condition (BB_END (bb));
dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
: FALLTHRU_EDGE (bb)->dest;
- if (dest && ! dest->pred->pred_next
+ if (dest && single_pred_p (dest)
&& dest != EXIT_BLOCK_PTR)
{
new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
XEXP (cond, 1));
implicit_sets[dest->index] = new;
- if (gcse_file)
+ if (dump_file)
{
- fprintf(gcse_file, "Implicit set of reg %d in ",
+ fprintf(dump_file, "Implicit set of reg %d in ",
REGNO (XEXP (cond, 0)));
- fprintf(gcse_file, "basic block %d\n", dest->index);
+ fprintf(dump_file, "basic block %d\n", dest->index);
}
count++;
}
}
}
- if (gcse_file)
- fprintf (gcse_file, "Found %d implicit sets\n", count);
+ if (dump_file)
+ fprintf (dump_file, "Found %d implicit sets\n", count);
}
/* Perform one copy/constant propagation pass.
perform conditional jump bypassing optimizations. */
static int
-one_cprop_pass (int pass, int cprop_jumps, int bypass_jumps)
+one_cprop_pass (int pass, bool cprop_jumps, bool bypass_jumps)
{
int changed = 0;
- const_prop_count = 0;
- copy_prop_count = 0;
+ global_const_prop_count = local_const_prop_count = 0;
+ global_copy_prop_count = local_copy_prop_count = 0;
local_cprop_pass (cprop_jumps);
/* Determine implicit sets. */
- implicit_sets = xcalloc (last_basic_block, sizeof (rtx));
+ implicit_sets = XCNEWVEC (rtx, last_basic_block);
find_implicit_sets ();
alloc_hash_table (max_cuid, &set_hash_table, 1);
free (implicit_sets);
implicit_sets = NULL;
- if (gcse_file)
- dump_hash_table (gcse_file, "SET", &set_hash_table);
+ if (dump_file)
+ dump_hash_table (dump_file, "SET", &set_hash_table);
if (set_hash_table.n_elems > 0)
{
alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
free_hash_table (&set_hash_table);
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
+ fprintf (dump_file, "CPROP of %s, pass %d: %d bytes needed, ",
current_function_name (), pass, bytes_used);
- fprintf (gcse_file, "%d const props, %d copy props\n\n",
- const_prop_count, copy_prop_count);
+ fprintf (dump_file, "%d local const props, %d local copy props, ",
+ local_const_prop_count, local_copy_prop_count);
+ fprintf (dump_file, "%d global const props, %d global copy props\n\n",
+ global_const_prop_count, global_copy_prop_count);
}
/* Global analysis may get into infinite loops for unreachable blocks. */
if (changed && cprop_jumps)
if (set == 0)
break;
- if (GET_CODE (set->expr) != SET)
- abort ();
+ gcc_assert (GET_CODE (set->expr) == SET);
src = SET_SRC (set->expr);
if (gcse_constant_p (src))
bypass_block (basic_block bb, rtx setcc, rtx jump)
{
rtx insn, note;
- edge e, enext, edest;
+ edge e, edest;
int i, change;
int may_be_loop_header;
+ unsigned removed_p;
+ edge_iterator ei;
insn = (setcc != NULL) ? setcc : jump;
find_used_regs (&XEXP (note, 0), NULL);
may_be_loop_header = false;
- for (e = bb->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, bb->preds)
if (e->flags & EDGE_DFS_BACK)
{
may_be_loop_header = true;
}
change = 0;
- for (e = bb->pred; e; e = enext)
+ for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
{
- enext = e->pred_next;
+ removed_p = 0;
+
if (e->flags & EDGE_COMPLEX)
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
/* We can't redirect edges from new basic blocks. */
if (e->src->index >= bypass_last_basic_block)
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
/* The irreducible loops created by redirecting of edges entering the
loop from outside would decrease effectiveness of some of the following
optimizations, so prevent this. */
if (may_be_loop_header
&& !(e->flags & EDGE_DFS_BACK))
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
for (i = 0; i < reg_use_count; i++)
{
{
dest = BLOCK_FOR_INSN (XEXP (new, 0));
/* Don't bypass edges containing instructions. */
- for (edest = bb->succ; edest; edest = edest->succ_next)
- if (edest->dest == dest && edest->insns.r)
- {
- dest = NULL;
- break;
- }
+ edest = find_edge (bb, dest);
+ if (edest && edest->insns.r)
+ dest = NULL;
}
else
dest = NULL;
branch. We would end up emitting the instruction on "both"
edges. */
- if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc))))
- {
- edge e2;
- for (e2 = e->src->succ; e2; e2 = e2->succ_next)
- if (e2->dest == dest)
- {
- dest = NULL;
- break;
- }
- }
+ if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
+ && find_edge (e->src, dest))
+ dest = NULL;
old_dest = e->dest;
if (dest != NULL
insert_insn_on_edge (copy_insn (pat), e);
}
- if (gcse_file != NULL)
+ if (dump_file != NULL)
{
- fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ",
+ fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
+ "in jump_insn %d equals constant ",
regno, INSN_UID (jump));
- print_rtl (gcse_file, SET_SRC (set->expr));
- fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
+ print_rtl (dump_file, SET_SRC (set->expr));
+ fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
e->src->index, old_dest->index, dest->index);
}
change = 1;
+ removed_p = 1;
break;
}
}
+ if (!removed_p)
+ ei_next (&ei);
}
return change;
}
EXIT_BLOCK_PTR, next_bb)
{
/* Check for more than one predecessor. */
- if (bb->pred && bb->pred->pred_next)
+ if (!single_pred_p (bb))
{
setcc = NULL_RTX;
- for (insn = BB_HEAD (bb);
- insn != NULL && insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
- if (GET_CODE (insn) == INSN)
+ FOR_BB_INSNS (bb, insn)
+ if (NONJUMP_INSN_P (insn))
{
if (setcc)
break;
FOR_EACH_BB (bb)
{
edge e;
+ edge_iterator ei;
/* If the current block is the destination of an abnormal edge, we
kill all trapping expressions because we won't be able to properly
place the instruction on the edge. So make them neither
anticipatable nor transparent. This is fairly conservative. */
- for (e = bb->pred; e ; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, bb->preds)
if (e->flags & EDGE_ABNORMAL)
{
sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
}
- edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc,
+ edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
ae_kill, &pre_insert_map, &pre_delete_map);
sbitmap_vector_free (antloc);
antloc = NULL;
pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
{
edge pred;
-
- for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (pred, ei, bb->preds)
{
basic_block pred_bb = pred->src;
pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
{
int rval;
- char *visited = xcalloc (last_basic_block, 1);
+ char *visited = XCNEWVEC (char, last_basic_block);
rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
/* Otherwise, make a new insn to compute this expression and make sure the
insn will be recognized (this also adds any needed CLOBBERs). Copy the
expression to make sure we don't have any sharing issues. */
- else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
- abort ();
+ else
+ {
+ rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
+
+ if (insn_invalid_p (insn))
+ gcc_unreachable ();
+ }
+
pat = get_insns ();
end_sequence ();
rtx pat, pat_end;
pat = process_insert_insn (expr);
- if (pat == NULL_RTX || ! INSN_P (pat))
- abort ();
+ gcc_assert (pat && INSN_P (pat));
pat_end = pat;
while (NEXT_INSN (pat_end) != NULL_RTX)
instructions in presence of non-call exceptions. */
if (JUMP_P (insn)
- || (GET_CODE (insn) == INSN
- && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
+ || (NONJUMP_INSN_P (insn)
+ && (!single_succ_p (bb)
+ || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
{
#ifdef HAVE_cc0
rtx note;
/* It should always be the case that we can put these instructions
anywhere in the basic block with performing PRE optimizations.
Check this. */
- if (GET_CODE (insn) == INSN && pre
- && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
- && !TEST_BIT (transp[bb->index], expr->bitmap_index))
- abort ();
+ gcc_assert (!NONJUMP_INSN_P (insn) || !pre
+ || TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ || TEST_BIT (transp[bb->index], expr->bitmap_index));
/* If this is a jump table, then we can't insert stuff here. Since
we know the previous real insn must be the tablejump, we insert
}
#endif
/* FIXME: What if something in cc0/jump uses value set in new insn? */
- new_insn = emit_insn_before (pat, insn);
+ new_insn = emit_insn_before_noloc (pat, insn);
}
/* Likewise if the last insn is a call, as will happen in the presence
of exception handling. */
else if (CALL_P (insn)
- && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
+ && (!single_succ_p (bb)
+ || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
{
/* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
we search backward and place the instructions before the first
anywhere in the basic block with performing PRE optimizations.
Check this. */
- if (pre
- && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
- && !TEST_BIT (transp[bb->index], expr->bitmap_index))
- abort ();
+ gcc_assert (!pre
+ || TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ || TEST_BIT (transp[bb->index], expr->bitmap_index));
/* Since different machines initialize their parameter registers
in different orders, assume nothing. Collect the set of all
|| NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
- new_insn = emit_insn_before (pat, insn);
+ new_insn = emit_insn_before_noloc (pat, insn);
}
else
- new_insn = emit_insn_after (pat, insn);
+ new_insn = emit_insn_after_noloc (pat, insn);
while (1)
{
gcse_create_count++;
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
+ fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
bb->index, INSN_UID (new_insn));
- fprintf (gcse_file, "copying expression %d to reg %d\n",
+ fprintf (dump_file, "copying expression %d to reg %d\n",
expr->bitmap_index, regno);
}
}
if (! occr->deleted_p)
continue;
- /* Insert this expression on this edge if if it would
+ /* Insert this expression on this edge if it would
reach the deleted occurrence in BB. */
if (!TEST_BIT (inserted[e], j))
{
handling this situation. This one is easiest for
now. */
- if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
+ if (eg->flags & EDGE_ABNORMAL)
insert_insn_end_bb (index_map[j], bb, 0);
else
{
insert_insn_on_edge (insn, eg);
}
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
+ fprintf (dump_file, "PRE/HOIST: edge (%d,%d), ",
bb->index,
INDEX_EDGE_SUCC_BB (edge_list, e)->index);
- fprintf (gcse_file, "copy expression %d\n",
+ fprintf (dump_file, "copy expression %d\n",
expr->bitmap_index);
}
int regno = REGNO (reg);
int indx = expr->bitmap_index;
rtx pat = PATTERN (insn);
- rtx set, new_insn;
+ rtx set, first_set, new_insn;
rtx old_reg;
int i;
/* This block matches the logic in hash_scan_insn. */
- if (GET_CODE (pat) == SET)
- set = pat;
- else if (GET_CODE (pat) == PARALLEL)
+ switch (GET_CODE (pat))
{
+ case SET:
+ set = pat;
+ break;
+
+ case PARALLEL:
/* Search through the parallel looking for the set whose
source was the expression that we're interested in. */
+ first_set = NULL_RTX;
set = NULL_RTX;
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx x = XVECEXP (pat, 0, i);
- if (GET_CODE (x) == SET
- && expr_equiv_p (SET_SRC (x), expr->expr))
+ if (GET_CODE (x) == SET)
{
- set = x;
- break;
+ /* If the source was a REG_EQUAL or REG_EQUIV note, we
+ may not find an equivalent expression, but in this
+ case the PARALLEL will have a single set. */
+ if (first_set == NULL_RTX)
+ first_set = x;
+ if (expr_equiv_p (SET_SRC (x), expr->expr))
+ {
+ set = x;
+ break;
+ }
}
}
+
+ gcc_assert (first_set);
+ if (set == NULL_RTX)
+ set = first_set;
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
if (REG_P (SET_DEST (set)))
{
new_insn = emit_insn_after (new_insn, insn);
/* Keep register set table up to date. */
- replace_one_set (REGNO (old_reg), insn, new_insn);
record_one_set (regno, insn);
}
else
gcse_create_count++;
- if (gcse_file)
- fprintf (gcse_file,
+ if (dump_file)
+ fprintf (dump_file,
"PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
BLOCK_NUM (insn), INSN_UID (new_insn), indx,
INSN_UID (insn), regno);
changed = 1;
gcse_subst_count++;
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file,
+ fprintf (dump_file,
"PRE: redundant insn %d (expression %d) in ",
INSN_UID (insn), indx);
- fprintf (gcse_file, "bb %d, reaching reg is %d\n",
+ fprintf (dump_file, "bb %d, reaching reg is %d\n",
bb->index, REGNO (expr->reaching_reg));
}
}
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
- index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
+ index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
for (i = 0; i < expr_hash_table.size; i++)
for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
index_map[expr->bitmap_index] = expr;
compute_hash_table (&expr_hash_table);
trim_ld_motion_mems ();
- if (gcse_file)
- dump_hash_table (gcse_file, "Expression", &expr_hash_table);
+ if (dump_file)
+ dump_hash_table (dump_file, "Expression", &expr_hash_table);
if (expr_hash_table.n_elems > 0)
{
}
free_ldst_mems ();
- remove_fake_edges ();
+ remove_fake_exit_edges ();
free_hash_table (&expr_hash_table);
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
+ fprintf (dump_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
current_function_name (), pass, bytes_used);
- fprintf (gcse_file, "%d substs, %d insns created\n",
+ fprintf (dump_file, "%d substs, %d insns created\n",
gcse_subst_count, gcse_create_count);
}
LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
because the following loop optimization pass requires them. */
-/* ??? This is very similar to the loop.c add_label_notes function. We
- could probably share code here. */
-
/* ??? If there was a jump optimization pass after gcse and before loop,
then we would not need to do this here, because jump would add the
necessary REG_LABEL notes. */
passes++;
}
- if (gcse_file)
- fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
+ if (dump_file)
+ fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
}
/* Top level routine to do the dataflow analysis needed by code hoisting. */
compute_transpout ();
compute_code_hoist_vbeinout ();
calculate_dominance_info (CDI_DOMINATORS);
- if (gcse_file)
- fprintf (gcse_file, "\n");
+ if (dump_file)
+ fprintf (dump_file, "\n");
}
/* Determine if the expression identified by EXPR_INDEX would
hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
{
edge pred;
+ edge_iterator ei;
int visited_allocated_locally = 0;
if (visited == NULL)
{
visited_allocated_locally = 1;
- visited = xcalloc (last_basic_block, 1);
+ visited = XCNEWVEC (char, last_basic_block);
}
- for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
+ FOR_EACH_EDGE (pred, ei, bb->preds)
{
basic_block pred_bb = pred->src;
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
- index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
+ index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
for (i = 0; i < expr_hash_table.size; i++)
for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
index_map[expr->bitmap_index] = expr;
insn_inserted_p = 0;
/* These tests should be the same as the tests above. */
- if (TEST_BIT (hoist_vbeout[bb->index], i))
+ if (TEST_BIT (hoist_exprs[bb->index], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
occr = occr->next;
- /* Should never happen. */
- if (!occr)
- abort ();
-
+ gcc_assert (occr);
insn = occr->insn;
-
set = single_set (insn);
- if (! set)
- abort ();
+ gcc_assert (set);
/* Create a pseudo-reg to store the result of reaching
expressions into. Get the mode for the new pseudo
alloc_hash_table (max_cuid, &expr_hash_table, 0);
compute_hash_table (&expr_hash_table);
- if (gcse_file)
- dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table);
+ if (dump_file)
+ dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
if (expr_hash_table.n_elems > 0)
{
load towards the exit, and we end up with no loads or stores of 'i'
in the loop. */
+static hashval_t
+pre_ldst_expr_hash (const void *p)
+{
+ int do_not_record_p = 0;
+ const struct ls_expr *x = p;
+ return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
+}
+
+static int
+pre_ldst_expr_eq (const void *p1, const void *p2)
+{
+ const struct ls_expr *ptr1 = p1, *ptr2 = p2;
+ return expr_equiv_p (ptr1->pattern, ptr2->pattern);
+}
+
/* This will search the ldst list for a matching expression. If it
doesn't find one, we create one and initialize it. */
int do_not_record_p = 0;
struct ls_expr * ptr;
unsigned int hash;
+ void **slot;
+ struct ls_expr e;
- hash = hash_expr_1 (x, GET_MODE (x), & do_not_record_p);
+ hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
+ NULL, /*have_reg_qty=*/false);
- for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
- if (ptr->hash_index == hash && expr_equiv_p (ptr->pattern, x))
- return ptr;
+ e.pattern = x;
+ slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
+ if (*slot)
+ return (struct ls_expr *)*slot;
- ptr = xmalloc (sizeof (struct ls_expr));
+ ptr = XNEW (struct ls_expr);
ptr->next = pre_ldst_mems;
ptr->expr = NULL;
ptr->index = 0;
ptr->hash_index = hash;
pre_ldst_mems = ptr;
+ *slot = ptr;
return ptr;
}
static void
free_ldst_mems (void)
{
+ if (pre_ldst_table)
+ htab_delete (pre_ldst_table);
+ pre_ldst_table = NULL;
+
while (pre_ldst_mems)
{
struct ls_expr * tmp = pre_ldst_mems;
static struct ls_expr *
find_rtx_in_ldst (rtx x)
{
- struct ls_expr * ptr;
-
- for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
- if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
- return ptr;
-
- return NULL;
+ struct ls_expr e;
+ void **slot;
+ if (!pre_ldst_table)
+ return NULL;
+ e.pattern = x;
+ slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
+ if (!slot || ((struct ls_expr *)*slot)->invalid)
+ return NULL;
+ return *slot;
}
/* Assign each element of the list of mems a monotonically increasing value. */
rtx insn;
pre_ldst_mems = NULL;
+ pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
+ pre_ldst_expr_eq, NULL);
FOR_EACH_BB (bb)
{
- for (insn = BB_HEAD (bb);
- insn && insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
{
else
{
*last = ptr->next;
+ htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
free_ldst_entry (ptr);
ptr = * last;
}
}
/* Show the world what we've found. */
- if (gcse_file && pre_ldst_mems != NULL)
- print_ldst_list (gcse_file);
+ if (dump_file && pre_ldst_mems != NULL)
+ print_ldst_list (dump_file);
}
/* This routine will take an expression which we are replacing with
if (expr->reaching_reg == src)
continue;
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "PRE: store updated with reaching reg ");
- print_rtl (gcse_file, expr->reaching_reg);
- fprintf (gcse_file, ":\n ");
- print_inline_rtx (gcse_file, insn, 8);
- fprintf (gcse_file, "\n");
+ fprintf (dump_file, "PRE: store updated with reaching reg ");
+ print_rtl (dump_file, expr->reaching_reg);
+ fprintf (dump_file, ":\n ");
+ print_inline_rtx (dump_file, insn, 8);
+ fprintf (dump_file, "\n");
}
copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
case POST_DEC:
case POST_INC:
/* We do not run this function with arguments having side effects. */
- abort ();
+ gcc_unreachable ();
case PC:
case CC0: /*FIXME*/
if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
return;
+ /* Make sure that the SET_SRC of this store insns can be assigned to
+ a register, or we will fail later on in replace_store_insn, which
+ assumes that we can do this. But sometimes the target machine has
+ oddities like MEM read-modify-write instruction. See for example
+ PR24257. */
+ if (!can_assign_to_reg_p (SET_SRC (set)))
+ return;
+
ptr = ldst_entry (dest);
if (!ptr->pattern_regs)
ptr->pattern_regs = extract_mentioned_regs (dest);
max_gcse_regno);
sbitmap_vector_zero (reg_set_in_block, last_basic_block);
pre_ldst_mems = 0;
- last_set_in = xcalloc (max_gcse_regno, sizeof (int));
- already_set = xmalloc (sizeof (int) * max_gcse_regno);
+ pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
+ pre_ldst_expr_eq, NULL);
+ last_set_in = XCNEWVEC (int, max_gcse_regno);
+ already_set = XNEWVEC (int, max_gcse_regno);
/* Find all the stores we care about. */
FOR_EACH_BB (bb)
/* First compute the registers set in this block. */
regvec = last_set_in;
- for (insn = BB_HEAD (bb);
- insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
if (! INSN_P (insn))
continue;
if (CALL_P (insn))
{
- bool clobbers_all = false;
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP
- && find_reg_note (insn, REG_SETJMP, NULL_RTX))
- clobbers_all = true;
-#endif
-
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (clobbers_all
- || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
{
last_set_in[regno] = INSN_UID (insn);
SET_BIT (reg_set_in_block[bb->index], regno);
/* Now find the stores. */
memset (already_set, 0, sizeof (int) * max_gcse_regno);
regvec = already_set;
- for (insn = BB_HEAD (bb);
- insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
if (! INSN_P (insn))
continue;
if (CALL_P (insn))
{
- bool clobbers_all = false;
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP
- && find_reg_note (insn, REG_SETJMP, NULL_RTX))
- clobbers_all = true;
-#endif
-
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (clobbers_all
- || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
already_set[regno] = 1;
}
note_stores (pat, reg_clear_last_set, last_set_in);
if (CALL_P (insn))
{
- bool clobbers_all = false;
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP
- && find_reg_note (insn, REG_SETJMP, NULL_RTX))
- clobbers_all = true;
-#endif
-
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if ((clobbers_all
- || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
&& last_set_in[regno] == INSN_UID (insn))
last_set_in[regno] = 0;
}
#ifdef ENABLE_CHECKING
/* last_set_in should now be all-zero. */
for (regno = 0; regno < max_gcse_regno; regno++)
- if (last_set_in[regno] != 0)
- abort ();
+ gcc_assert (!last_set_in[regno]);
#endif
/* Clear temporary marks. */
if (!AVAIL_STORE_LIST (ptr))
{
*prev_next_ptr_ptr = ptr->next;
+ htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
free_ldst_entry (ptr);
}
else
ret = enumerate_ldsts ();
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
- print_ldst_list (gcse_file);
+ fprintf (dump_file, "ST_avail and ST_antic (shown under loads..)\n");
+ print_ldst_list (dump_file);
}
free (last_set_in);
/* Check if INSN kills the store pattern X (is aliased with it).
AFTER is true if we are checking the case when store X occurs
- after the insn. Return true if it it does. */
+ after the insn. Return true if it does. */
static bool
store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after)
rtx pat = PATTERN (insn);
rtx dest = SET_DEST (pat);
- if (GET_CODE (dest) == SIGN_EXTRACT
- || GET_CODE (dest) == ZERO_EXTRACT)
+ if (GET_CODE (dest) == ZERO_EXTRACT)
dest = XEXP (dest, 0);
/* Check for memory stores to aliased objects. */
if (TEST_BIT (ae_gen[bb->index], ptr->index))
{
rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
- if (gcse_file)
- fprintf (gcse_file, "Removing redundant store:\n");
+ if (dump_file)
+ fprintf (dump_file, "Removing redundant store:\n");
replace_store_insn (r, XEXP (st, 0), bb, ptr);
continue;
}
transp = sbitmap_vector_alloc (last_basic_block, num_stores);
sbitmap_vector_zero (transp, last_basic_block);
- regs_set_in_block = xmalloc (sizeof (int) * max_gcse_regno);
+ regs_set_in_block = XNEWVEC (int, max_gcse_regno);
FOR_EACH_BB (bb)
{
free (regs_set_in_block);
- if (gcse_file)
+ if (dump_file)
{
- dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
- dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
- dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
- dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_kill", "", ae_kill, last_basic_block);
+ dump_sbitmap_vector (dump_file, "Transpt", "", transp, last_basic_block);
+ dump_sbitmap_vector (dump_file, "st_avloc", "", ae_gen, last_basic_block);
}
}
before = NEXT_INSN (before);
}
- insn = emit_insn_after (insn, prev);
+ insn = emit_insn_after_noloc (insn, prev);
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
+ fprintf (dump_file, "STORE_MOTION insert store at start of BB %d:\n",
bb->index);
- print_inline_rtx (gcse_file, insn, 6);
- fprintf (gcse_file, "\n");
+ print_inline_rtx (dump_file, insn, 6);
+ fprintf (dump_file, "\n");
}
}
rtx reg, insn;
basic_block bb;
edge tmp;
+ edge_iterator ei;
/* We did all the deleted before this insert, so if we didn't delete a
store, then we haven't set the reaching reg yet either. */
insert it at the start of the BB, and reset the insert bits on the other
edges so we don't try to insert it on the other edges. */
bb = e->dest;
- for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
+ FOR_EACH_EDGE (tmp, ei, e->dest->preds)
if (!(tmp->flags & EDGE_FAKE))
{
int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
- if (index == EDGE_INDEX_NO_EDGE)
- abort ();
+
+ gcc_assert (index != EDGE_INDEX_NO_EDGE);
if (! TEST_BIT (pre_insert_map[index], expr->index))
break;
}
insertion vector for these edges, and insert at the start of the BB. */
if (!tmp && bb != EXIT_BLOCK_PTR)
{
- for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
+ FOR_EACH_EDGE (tmp, ei, e->dest->preds)
{
int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
RESET_BIT (pre_insert_map[index], expr->index);
return 0;
}
- /* We can't insert on this edge, so we'll insert at the head of the
- successors block. See Morgan, sec 10.5. */
- if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
- {
- insert_insn_start_bb (insn, bb);
- return 0;
- }
+ /* We can't put stores in the front of blocks pointed to by abnormal
+ edges since that may put a store where one didn't used to be. */
+ gcc_assert (!(e->flags & EDGE_ABNORMAL));
insert_insn_on_edge (insn, e);
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
+ fprintf (dump_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
e->src->index, e->dest->index);
- print_inline_rtx (gcse_file, insn, 6);
- fprintf (gcse_file, "\n");
+ print_inline_rtx (dump_file, insn, 6);
+ fprintf (dump_file, "\n");
}
return 1;
static void
remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
{
- edge *stack = xmalloc (sizeof (edge) * n_basic_blocks), act;
+ edge_iterator *stack, ei;
+ int sp;
+ edge act;
sbitmap visited = sbitmap_alloc (last_basic_block);
- int stack_top = 0;
rtx last, insn, note;
rtx mem = smexpr->pattern;
+ stack = XNEWVEC (edge_iterator, n_basic_blocks);
+ sp = 0;
+ ei = ei_start (bb->succs);
+
sbitmap_zero (visited);
- act = bb->succ;
+ act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
while (1)
{
if (!act)
{
- if (!stack_top)
+ if (!sp)
{
free (stack);
sbitmap_free (visited);
return;
}
- act = stack[--stack_top];
+ act = ei_edge (stack[--sp]);
}
bb = act->dest;
if (bb == EXIT_BLOCK_PTR
|| TEST_BIT (visited, bb->index))
{
- act = act->succ_next;
+ if (!ei_end_p (ei))
+ ei_next (&ei);
+ act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
continue;
}
SET_BIT (visited, bb->index);
if (!note || !expr_equiv_p (XEXP (note, 0), mem))
continue;
- if (gcse_file)
- fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
+ if (dump_file)
+ fprintf (dump_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
INSN_UID (insn));
remove_note (insn, note);
}
- act = act->succ_next;
- if (bb->succ)
+
+ if (!ei_end_p (ei))
+ ei_next (&ei);
+ act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
+
+ if (EDGE_COUNT (bb->succs) > 0)
{
if (act)
- stack[stack_top++] = act;
- act = bb->succ;
+ stack[sp++] = ei;
+ ei = ei_start (bb->succs);
+ act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
}
}
}
static void
replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
{
- rtx insn, mem, note, set, ptr;
+ rtx insn, mem, note, set, ptr, pair;
mem = smexpr->pattern;
insn = gen_move_insn (reg, SET_SRC (single_set (del)));
insn = emit_insn_after (insn, del);
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file,
+ fprintf (dump_file,
"STORE_MOTION delete insn in BB %d:\n ", bb->index);
- print_inline_rtx (gcse_file, del, 6);
- fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
- print_inline_rtx (gcse_file, insn, 6);
- fprintf (gcse_file, "\n");
+ print_inline_rtx (dump_file, del, 6);
+ fprintf (dump_file, "\nSTORE MOTION replaced with insn:\n ");
+ print_inline_rtx (dump_file, insn, 6);
+ fprintf (dump_file, "\n");
}
for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
XEXP (ptr, 0) = insn;
break;
}
+
+ /* Move the notes from the deleted insn to its replacement, and patch
+ up the LIBCALL notes. */
+ REG_NOTES (insn) = REG_NOTES (del);
+
+ note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ {
+ pair = XEXP (note, 0);
+ note = find_reg_note (pair, REG_LIBCALL, NULL_RTX);
+ XEXP (note, 0) = insn;
+ }
+ note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
+ if (note)
+ {
+ pair = XEXP (note, 0);
+ note = find_reg_note (pair, REG_RETVAL, NULL_RTX);
+ XEXP (note, 0) = insn;
+ }
+
delete_insn (del);
/* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
if (!note || !expr_equiv_p (XEXP (note, 0), mem))
continue;
- if (gcse_file)
- fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
+ if (dump_file)
+ fprintf (dump_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
INSN_UID (insn));
remove_note (insn, note);
}
struct ls_expr * ptr;
int update_flow = 0;
- if (gcse_file)
+ if (dump_file)
{
- fprintf (gcse_file, "before store motion\n");
- print_rtl (gcse_file, get_insns ());
+ fprintf (dump_file, "before store motion\n");
+ print_rtl (dump_file, get_insns ());
}
init_alias_analysis ();
num_stores = compute_store_table ();
if (num_stores == 0)
{
+ htab_delete (pre_ldst_table);
+ pre_ldst_table = NULL;
sbitmap_vector_free (reg_set_in_block);
end_alias_analysis ();
return;
add_noreturn_fake_exit_edges ();
connect_infinite_loops_to_exit ();
- edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
+ edge_list = pre_edge_rev_lcm (num_stores, transp, ae_gen,
st_antloc, ae_kill, &pre_insert_map,
&pre_delete_map);
/* Now we want to insert the new stores which are going to be needed. */
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
+ /* If any of the edges we have above are abnormal, we can't move this
+ store. */
+ for (x = NUM_EDGES (edge_list) - 1; x >= 0; x--)
+ if (TEST_BIT (pre_insert_map[x], ptr->index)
+ && (INDEX_EDGE (edge_list, x)->flags & EDGE_ABNORMAL))
+ break;
+
+ if (x >= 0)
+ {
+ if (dump_file != NULL)
+ fprintf (dump_file,
+ "Can't replace store %d: abnormal edge from %d to %d\n",
+ ptr->index, INDEX_EDGE (edge_list, x)->src->index,
+ INDEX_EDGE (edge_list, x)->dest->index);
+ continue;
+ }
+
+ /* Now we want to insert the new stores which are going to be needed. */
+
FOR_EACH_BB (bb)
if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
delete_store (ptr, bb);
free_store_memory ();
free_edge_list (edge_list);
- remove_fake_edges ();
+ remove_fake_exit_edges ();
end_alias_analysis ();
}
\f
/* Entry point for jump bypassing optimization pass. */
-int
-bypass_jumps (FILE *file)
+static int
+bypass_jumps (void)
{
int changed;
if (current_function_calls_setjmp)
return 0;
- /* For calling dump_foo fns from gdb. */
- debug_stderr = stderr;
- gcse_file = file;
-
/* Identify the basic block information for this function, including
successors and predecessors. */
max_gcse_regno = max_reg_num ();
- if (file)
- dump_flow_info (file);
+ if (dump_file)
+ dump_flow_info (dump_file, dump_flags);
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled")))
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ || is_too_expensive (_ ("jump bypassing disabled")))
return 0;
gcc_obstack_init (&gcse_obstack);
information about memory sets when we build the hash tables. */
alloc_reg_set_mem (max_gcse_regno);
- compute_sets (get_insns ());
+ compute_sets ();
max_gcse_regno = max_reg_num ();
- alloc_gcse_mem (get_insns ());
- changed = one_cprop_pass (1, 1, 1);
+ alloc_gcse_mem ();
+ changed = one_cprop_pass (MAX_GCSE_PASSES + 2, true, true);
free_gcse_mem ();
- if (file)
+ if (dump_file)
{
- fprintf (file, "BYPASS of %s: %d basic blocks, ",
+ fprintf (dump_file, "BYPASS of %s: %d basic blocks, ",
current_function_name (), n_basic_blocks);
- fprintf (file, "%d bytes\n\n", bytes_used);
+ fprintf (dump_file, "%d bytes\n\n", bytes_used);
}
obstack_free (&gcse_obstack, NULL);
graceful degradation. */
if (n_edges > 20000 + n_basic_blocks * 4)
{
- if (warn_disabled_optimization)
- warning ("%s: %d basic blocks and %d edges/basic block",
- pass, n_basic_blocks, n_edges / n_basic_blocks);
+ warning (OPT_Wdisabled_optimization,
+ "%s: %d basic blocks and %d edges/basic block",
+ pass, n_basic_blocks, n_edges / n_basic_blocks);
return true;
}
* SBITMAP_SET_SIZE (max_reg_num ())
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
- if (warn_disabled_optimization)
- warning ("%s: %d basic blocks and %d registers",
- pass, n_basic_blocks, max_reg_num ());
+ warning (OPT_Wdisabled_optimization,
+ "%s: %d basic blocks and %d registers",
+ pass, n_basic_blocks, max_reg_num ());
return true;
}
return false;
}
-
-/* The following code implements gcse after reload, the purpose of this
- pass is to cleanup redundant loads generated by reload and other
- optimizations that come after gcse. It searches for simple inter-block
- redundancies and tries to eliminate them by adding moves and loads
- in cold places. */
-
-/* The following structure holds the information about the occurrences of
- the redundant instructions. */
-struct unoccr
-{
- struct unoccr *next;
- edge pred;
- rtx insn;
-};
-
-static bool reg_used_on_edge (rtx, edge);
-static rtx reg_set_between_after_reload_p (rtx, rtx, rtx);
-static rtx reg_used_between_after_reload_p (rtx, rtx, rtx);
-static rtx get_avail_load_store_reg (rtx);
-static bool is_jump_table_basic_block (basic_block);
-static bool bb_has_well_behaved_predecessors (basic_block);
-static struct occr* get_bb_avail_insn (basic_block, struct occr *);
-static void hash_scan_set_after_reload (rtx, rtx, struct hash_table *);
-static void compute_hash_table_after_reload (struct hash_table *);
-static void eliminate_partially_redundant_loads (basic_block,
- rtx,
- struct expr *);
-static void gcse_after_reload (void);
-static struct occr* get_bb_avail_insn (basic_block, struct occr *);
-void gcse_after_reload_main (rtx, FILE *);
-
-
-/* Check if register REG is used in any insn waiting to be inserted on E.
- Assumes no such insn can be a CALL_INSN; if so call reg_used_between_p
- with PREV(insn),NEXT(insn) instead of calling
- reg_overlap_mentioned_p. */
-
+\f
static bool
-reg_used_on_edge (rtx reg, edge e)
+gate_handle_jump_bypass (void)
{
- rtx insn;
-
- for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
- if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
- return true;
-
- return false;
+ return optimize > 0 && flag_gcse;
}
-/* Return the insn that sets register REG or clobbers it in between
- FROM_INSN and TO_INSN (exclusive of those two).
- Just like reg_set_between but for hard registers and not pseudos. */
-
-static rtx
-reg_set_between_after_reload_p (rtx reg, rtx from_insn, rtx to_insn)
+/* Perform jump bypassing and control flow optimizations. */
+static unsigned int
+rest_of_handle_jump_bypass (void)
{
- rtx insn;
- int regno;
-
- if (! REG_P (reg))
- abort ();
- regno = REGNO (reg);
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+ reg_scan (get_insns (), max_reg_num ());
- /* We are called after register allocation. */
- if (regno >= FIRST_PSEUDO_REGISTER)
- abort ();
-
- if (from_insn == to_insn)
- return NULL_RTX;
-
- for (insn = NEXT_INSN (from_insn);
- insn != to_insn;
- insn = NEXT_INSN (insn))
+ if (bypass_jumps ())
{
- if (INSN_P (insn))
- {
- if (FIND_REG_INC_NOTE (insn, reg)
- || (CALL_P (insn)
- && call_used_regs[regno])
- || find_reg_fusage (insn, CLOBBER, reg))
- return insn;
- }
- if (set_of (reg, insn) != NULL_RTX)
- return insn;
+ rebuild_jump_labels (get_insns ());
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+ delete_trivially_dead_insns (get_insns (), max_reg_num ());
}
- return NULL_RTX;
-}
-
-/* Return the insn that uses register REG in between FROM_INSN and TO_INSN
- (exclusive of those two). Similar to reg_used_between but for hard
- registers and not pseudos. */
-
-static rtx
-reg_used_between_after_reload_p (rtx reg, rtx from_insn, rtx to_insn)
-{
- rtx insn;
- int regno;
-
- if (! REG_P (reg))
- return to_insn;
- regno = REGNO (reg);
-
- /* We are called after register allocation. */
- if (regno >= FIRST_PSEUDO_REGISTER)
- abort ();
- if (from_insn == to_insn)
- return NULL_RTX;
-
- for (insn = NEXT_INSN (from_insn);
- insn != to_insn;
- insn = NEXT_INSN (insn))
- if (INSN_P (insn)
- && (reg_overlap_mentioned_p (reg, PATTERN (insn))
- || (CALL_P (insn)
- && call_used_regs[regno])
- || find_reg_fusage (insn, USE, reg)
- || find_reg_fusage (insn, CLOBBER, reg)))
- return insn;
- return NULL_RTX;
-}
-
-/* Return the loaded/stored register of a load/store instruction. */
-
-static rtx
-get_avail_load_store_reg (rtx insn)
-{
- if (REG_P (SET_DEST (PATTERN (insn)))) /* A load. */
- return SET_DEST(PATTERN(insn));
- if (REG_P (SET_SRC (PATTERN (insn)))) /* A store. */
- return SET_SRC (PATTERN (insn));
- abort ();
+ return 0;
}
-/* Don't handle ABNORMAL edges or jump tables. */
-
-static bool
-is_jump_table_basic_block (basic_block bb)
-{
- rtx insn = BB_END (bb);
-
- if (JUMP_TABLE_DATA_P (insn))
- return true;
- return false;
-}
+struct tree_opt_pass pass_jump_bypass =
+{
+ "bypass", /* name */
+ gate_handle_jump_bypass, /* gate */
+ rest_of_handle_jump_bypass, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_BYPASS, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func |
+ TODO_ggc_collect | TODO_verify_flow, /* todo_flags_finish */
+ 'G' /* letter */
+};
-/* Return nonzero if the predecessors of BB are "well behaved". */
static bool
-bb_has_well_behaved_predecessors (basic_block bb)
-{
- edge pred;
-
- if (! bb->pred)
- return false;
- for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
- if (((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred))
- || is_jump_table_basic_block (pred->src))
- return false;
- return true;
-}
-
-
-/* Search for the occurrences of expression in BB. */
-
-static struct occr*
-get_bb_avail_insn (basic_block bb, struct occr *occr)
+gate_handle_gcse (void)
{
- for (; occr != NULL; occr = occr->next)
- if (BLOCK_FOR_INSN (occr->insn)->index == bb->index)
- return occr;
- return NULL;
-}
-
-/* Perform partial GCSE pass after reload, try to eliminate redundant loads
- created by the reload pass. We try to look for a full or partial
- redundant loads fed by one or more loads/stores in predecessor BBs,
- and try adding loads to make them fully redundant. We also check if
- it's worth adding loads to be able to delete the redundant load.
-
- Algorithm:
- 1. Build available expressions hash table:
- For each load/store instruction, if the loaded/stored memory didn't
- change until the end of the basic block add this memory expression to
- the hash table.
- 2. Perform Redundancy elimination:
- For each load instruction do the following:
- perform partial redundancy elimination, check if it's worth adding
- loads to make the load fully redundant. If so add loads and
- register copies and delete the load.
-
- Future enhancement:
- if loaded register is used/defined between load and some store,
- look for some other free register between load and all its stores,
- and replace load with a copy from this register to the loaded
- register. */
-
-
-/* This handles the case where several stores feed a partially redundant
- load. It checks if the redundancy elimination is possible and if it's
- worth it. */
-
-static void
-eliminate_partially_redundant_loads (basic_block bb, rtx insn,
- struct expr *expr)
-{
- edge pred;
- rtx avail_insn = NULL_RTX;
- rtx avail_reg;
- rtx dest, pat;
- struct occr *a_occr;
- struct unoccr *occr, *avail_occrs = NULL;
- struct unoccr *unoccr, *unavail_occrs = NULL;
- int npred_ok = 0;
- gcov_type ok_count = 0; /* Redundant load execution count. */
- gcov_type critical_count = 0; /* Execution count of critical edges. */
-
- /* The execution count of the loads to be added to make the
- load fully redundant. */
- gcov_type not_ok_count = 0;
- basic_block pred_bb;
-
- pat = PATTERN (insn);
- dest = SET_DEST (pat);
- /* Check that the loaded register is not used, set, or killed from the
- beginning of the block. */
- if (reg_used_between_after_reload_p (dest,
- PREV_INSN (BB_HEAD (bb)), insn)
- || reg_set_between_after_reload_p (dest,
- PREV_INSN (BB_HEAD (bb)), insn))
- return;
-
- /* Check potential for replacing load with copy for predecessors. */
- for (pred = bb->pred; pred; pred = pred->pred_next)
- {
- rtx next_pred_bb_end;
-
- avail_insn = NULL_RTX;
- pred_bb = pred->src;
- next_pred_bb_end = NEXT_INSN (BB_END (pred_bb));
- for (a_occr = get_bb_avail_insn (pred_bb, expr->avail_occr); a_occr;
- a_occr = get_bb_avail_insn (pred_bb, a_occr->next))
- {
- /* Check if the loaded register is not used. */
- avail_insn = a_occr->insn;
- if (! (avail_reg = get_avail_load_store_reg (avail_insn)))
- abort ();
- /* Make sure we can generate a move from register avail_reg to
- dest. */
- extract_insn (gen_move_insn (copy_rtx (dest),
- copy_rtx (avail_reg)));
- if (! constrain_operands (1)
- || reg_killed_on_edge (avail_reg, pred)
- || reg_used_on_edge (dest, pred))
- {
- avail_insn = NULL;
- continue;
- }
- if (! reg_set_between_after_reload_p (avail_reg, avail_insn,
- next_pred_bb_end))
- /* AVAIL_INSN remains non-null. */
- break;
- else
- avail_insn = NULL;
- }
- if (avail_insn != NULL_RTX)
- {
- npred_ok++;
- ok_count += pred->count;
- if (EDGE_CRITICAL_P (pred))
- critical_count += pred->count;
- occr = gmalloc (sizeof (struct unoccr));
- occr->insn = avail_insn;
- occr->pred = pred;
- occr->next = avail_occrs;
- avail_occrs = occr;
- }
- else
- {
- not_ok_count += pred->count;
- if (EDGE_CRITICAL_P (pred))
- critical_count += pred->count;
- unoccr = gmalloc (sizeof (struct unoccr));
- unoccr->insn = NULL_RTX;
- unoccr->pred = pred;
- unoccr->next = unavail_occrs;
- unavail_occrs = unoccr;
- }
- }
-
- if (npred_ok == 0 /* No load can be replaced by copy. */
- || (optimize_size && npred_ok > 1)) /* Prevent exploding the code. */
- goto cleanup;
-
- /* Check if it's worth applying the partial redundancy elimination. */
- if (ok_count < GCSE_AFTER_RELOAD_PARTIAL_FRACTION * not_ok_count)
- goto cleanup;
-
- if (ok_count < GCSE_AFTER_RELOAD_CRITICAL_FRACTION * critical_count)
- goto cleanup;
-
- /* Generate moves to the loaded register from where
- the memory is available. */
- for (occr = avail_occrs; occr; occr = occr->next)
- {
- avail_insn = occr->insn;
- pred = occr->pred;
- /* Set avail_reg to be the register having the value of the
- memory. */
- avail_reg = get_avail_load_store_reg (avail_insn);
- if (! avail_reg)
- abort ();
-
- insert_insn_on_edge (gen_move_insn (copy_rtx (dest),
- copy_rtx (avail_reg)),
- pred);
-
- if (gcse_file)
- fprintf (gcse_file,
- "GCSE AFTER reload generating move from %d to %d on \
- edge from %d to %d\n",
- REGNO (avail_reg),
- REGNO (dest),
- pred->src->index,
- pred->dest->index);
- }
-
- /* Regenerate loads where the memory is unavailable. */
- for (unoccr = unavail_occrs; unoccr; unoccr = unoccr->next)
- {
- pred = unoccr->pred;
- insert_insn_on_edge (copy_insn (PATTERN (insn)), pred);
-
- if (gcse_file)
- fprintf (gcse_file,
- "GCSE AFTER reload: generating on edge from %d to %d\
- a copy of load:\n",
- pred->src->index,
- pred->dest->index);
- }
-
- /* Delete the insn if it is not available in this block and mark it
- for deletion if it is available. If insn is available it may help
- discover additional redundancies, so mark it for later deletion.*/
- for (a_occr = get_bb_avail_insn (bb, expr->avail_occr);
- a_occr && (a_occr->insn != insn);
- a_occr = get_bb_avail_insn (bb, a_occr->next));
-
- if (!a_occr)
- delete_insn (insn);
- else
- a_occr->deleted_p = 1;
-
-cleanup:
-
- while (unavail_occrs)
- {
- struct unoccr *temp = unavail_occrs->next;
- free (unavail_occrs);
- unavail_occrs = temp;
- }
-
- while (avail_occrs)
- {
- struct unoccr *temp = avail_occrs->next;
- free (avail_occrs);
- avail_occrs = temp;
- }
+ return optimize > 0 && flag_gcse;
}
-/* Performing the redundancy elimination as described before. */
-static void
-gcse_after_reload (void)
+static unsigned int
+rest_of_handle_gcse (void)
{
- unsigned int i;
- rtx insn;
- basic_block bb;
- struct expr *expr;
- struct occr *occr;
-
- /* Note we start at block 1. */
-
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
- return;
-
- FOR_BB_BETWEEN (bb,
- ENTRY_BLOCK_PTR->next_bb->next_bb,
- EXIT_BLOCK_PTR,
- next_bb)
- {
- if (! bb_has_well_behaved_predecessors (bb))
- continue;
-
- /* Do not try this optimization on cold basic blocks. */
- if (probably_cold_bb_p (bb))
- continue;
-
- reset_opr_set_tables ();
-
- for (insn = BB_HEAD (bb);
- insn != NULL
- && insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
- {
- /* Is it a load - of the form (set (reg) (mem))? */
- if (GET_CODE (insn) == INSN
- && GET_CODE (PATTERN (insn)) == SET
- && REG_P (SET_DEST (PATTERN (insn)))
- && MEM_P (SET_SRC (PATTERN (insn))))
- {
- rtx pat = PATTERN (insn);
- rtx src = SET_SRC (pat);
- struct expr *expr;
-
- if (general_operand (src, GET_MODE (src))
- /* Is the expression recorded? */
- && (expr = lookup_expr (src, &expr_hash_table)) != NULL
- /* Are the operands unchanged since the start of the
- block? */
- && oprs_not_set_p (src, insn)
- && ! MEM_VOLATILE_P (src)
- && GET_MODE (src) != BLKmode
- && !(flag_non_call_exceptions && may_trap_p (src))
- && !side_effects_p (src))
- {
- /* We now have a load (insn) and an available memory at
- its BB start (expr). Try to remove the loads if it is
- redundant. */
- eliminate_partially_redundant_loads (bb, insn, expr);
- }
- }
+ int save_csb, save_cfj;
+ int tem2 = 0, tem;
- /* Keep track of everything modified by this insn. */
- if (INSN_P (insn))
- mark_oprs_set (insn);
- }
- }
+ tem = gcse_main (get_insns ());
+ rebuild_jump_labels (get_insns ());
+ delete_trivially_dead_insns (get_insns (), max_reg_num ());
- commit_edge_insertions ();
+ save_csb = flag_cse_skip_blocks;
+ save_cfj = flag_cse_follow_jumps;
+ flag_cse_skip_blocks = flag_cse_follow_jumps = 0;
- /* Go over the expression hash table and delete insns that were
- marked for later deletion. */
- for (i = 0; i < expr_hash_table.size; i++)
- {
- for (expr = expr_hash_table.table[i];
- expr != NULL;
- expr = expr->next_same_hash)
- for (occr = expr->avail_occr; occr; occr = occr->next)
- if (occr->deleted_p)
- delete_insn (occr->insn);
- }
-}
-
-/* Scan pattern PAT of INSN and add an entry to the hash TABLE.
- After reload we are interested in loads/stores only. */
-
-static void
-hash_scan_set_after_reload (rtx pat, rtx insn, struct hash_table *table)
-{
- rtx src = SET_SRC (pat);
- rtx dest = SET_DEST (pat);
-
- if (! MEM_P (src) && ! MEM_P (dest))
- return;
-
- if (REG_P (dest))
- {
- if (/* Don't GCSE something if we can't do a reg/reg copy. */
- can_copy_p (GET_MODE (dest))
- /* GCSE commonly inserts instruction after the insn. We can't
- do that easily for EH_REGION notes so disable GCSE on these
- for now. */
- && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
- /* Is SET_SRC something we want to gcse? */
- && general_operand (src, GET_MODE (src))
- /* Don't CSE a nop. */
- && ! set_noop_p (pat)
- && ! JUMP_P (insn))
- {
- /* An expression is not available if its operands are
- subsequently modified, including this insn. */
- if (oprs_available_p (src, insn))
- insert_expr_in_table (src, GET_MODE (dest), insn, 0, 1, table);
- }
- }
- else if (REG_P (src))
+ /* If -fexpensive-optimizations, re-run CSE to clean up things done
+ by gcse. */
+ if (flag_expensive_optimizations)
{
- /* Only record sets of pseudo-regs in the hash table. */
- if (/* Don't GCSE something if we can't do a reg/reg copy. */
- can_copy_p (GET_MODE (src))
- /* GCSE commonly inserts instruction after the insn. We can't
- do that easily for EH_REGION notes so disable GCSE on these
- for now. */
- && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
- /* Is SET_DEST something we want to gcse? */
- && general_operand (dest, GET_MODE (dest))
- /* Don't CSE a nop. */
- && ! set_noop_p (pat)
- &&! JUMP_P (insn)
- && ! (flag_float_store && FLOAT_MODE_P (GET_MODE (dest)))
- /* Check if the memory expression is killed after insn. */
- && ! load_killed_in_block_p (BLOCK_FOR_INSN (insn),
- INSN_CUID (insn) + 1,
- dest,
- 1)
- && oprs_unchanged_p (XEXP (dest, 0), insn, 1))
- {
- insert_expr_in_table (dest, GET_MODE (dest), insn, 0, 1, table);
- }
+ timevar_push (TV_CSE);
+ reg_scan (get_insns (), max_reg_num ());
+ tem2 = cse_main (get_insns (), max_reg_num ());
+ purge_all_dead_edges ();
+ delete_trivially_dead_insns (get_insns (), max_reg_num ());
+ timevar_pop (TV_CSE);
+ cse_not_expected = !flag_rerun_cse_after_loop;
}
-}
-
-
-/* Create hash table of memory expressions available at end of basic
- blocks. */
-
-static void
-compute_hash_table_after_reload (struct hash_table *table)
-{
- unsigned int i;
-
- table->set_p = 0;
-
- /* Initialize count of number of entries in hash table. */
- table->n_elems = 0;
- memset ((char *) table->table, 0,
- table->size * sizeof (struct expr *));
-
- /* While we compute the hash table we also compute a bit array of which
- registers are set in which blocks. */
- sbitmap_vector_zero (reg_set_in_block, last_basic_block);
-
- /* Re-cache any INSN_LIST nodes we have allocated. */
- clear_modify_mem_tables ();
-
- /* Some working arrays used to track first and last set in each block. */
- reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
- for (i = 0; i < max_gcse_regno; ++i)
- reg_avail_info[i].last_bb = NULL;
-
- FOR_EACH_BB (current_bb)
+ /* If gcse or cse altered any jumps, rerun jump optimizations to clean
+ things up. */
+ if (tem || tem2)
{
- rtx insn;
- unsigned int regno;
-
- /* First pass over the instructions records information used to
- determine when registers and memory are first and last set. */
- for (insn = BB_HEAD (current_bb);
- insn && insn != NEXT_INSN (BB_END (current_bb));
- insn = NEXT_INSN (insn))
- {
- if (! INSN_P (insn))
- continue;
-
- if (CALL_P (insn))
- {
- bool clobbers_all = false;
-
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP
- && find_reg_note (insn, REG_SETJMP, NULL_RTX))
- clobbers_all = true;
-#endif
-
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (clobbers_all
- || TEST_HARD_REG_BIT (regs_invalidated_by_call,
- regno))
- record_last_reg_set_info (insn, regno);
-
- mark_call (insn);
- }
-
- note_stores (PATTERN (insn), record_last_set_info, insn);
-
- if (GET_CODE (PATTERN (insn)) == SET)
- {
- rtx src, dest;
-
- src = SET_SRC (PATTERN (insn));
- dest = SET_DEST (PATTERN (insn));
- if (MEM_P (src) && auto_inc_p (XEXP (src, 0)))
- {
- regno = REGNO (XEXP (XEXP (src, 0), 0));
- record_last_reg_set_info (insn, regno);
- }
- if (MEM_P (dest) && auto_inc_p (XEXP (dest, 0)))
- {
- regno = REGNO (XEXP (XEXP (dest, 0), 0));
- record_last_reg_set_info (insn, regno);
- }
- }
- }
-
- /* The next pass builds the hash table. */
- for (insn = BB_HEAD (current_bb);
- insn && insn != NEXT_INSN (BB_END (current_bb));
- insn = NEXT_INSN (insn))
- if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SET)
- if (! find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- hash_scan_set_after_reload (PATTERN (insn), insn, table);
+ timevar_push (TV_JUMP);
+ rebuild_jump_labels (get_insns ());
+ delete_dead_jumptables ();
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+ timevar_pop (TV_JUMP);
}
- free (reg_avail_info);
- reg_avail_info = NULL;
+ flag_cse_skip_blocks = save_csb;
+ flag_cse_follow_jumps = save_cfj;
+ return 0;
}
+struct tree_opt_pass pass_gcse =
+{
+ "gcse1", /* name */
+ gate_handle_gcse, /* gate */
+ rest_of_handle_gcse, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_GCSE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func |
+ TODO_verify_flow | TODO_ggc_collect, /* todo_flags_finish */
+ 'G' /* letter */
+};
-/* Main entry point of the GCSE after reload - clean some redundant loads
- due to spilling. */
-
-void
-gcse_after_reload_main (rtx f, FILE* file)
-{
- gcse_subst_count = 0;
- gcse_create_count = 0;
-
- gcse_file = file;
-
- gcc_obstack_init (&gcse_obstack);
- bytes_used = 0;
-
- /* We need alias. */
- init_alias_analysis ();
-
- max_gcse_regno = max_reg_num ();
-
- alloc_reg_set_mem (max_gcse_regno);
- alloc_gcse_mem (f);
- alloc_hash_table (max_cuid, &expr_hash_table, 0);
- compute_hash_table_after_reload (&expr_hash_table);
-
- if (gcse_file)
- dump_hash_table (gcse_file, "Expression", &expr_hash_table);
-
- if (expr_hash_table.n_elems > 0)
- gcse_after_reload ();
-
- free_hash_table (&expr_hash_table);
-
- free_gcse_mem ();
- free_reg_set_mem ();
-
- /* We are finished with alias. */
- end_alias_analysis ();
-
- obstack_free (&gcse_obstack, NULL);
-}
#include "gt-gcse.h"