/* Global common subexpression elimination/Partial redundancy elimination
and global constant/copy propagation for GNU compiler.
Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
- 2006, 2007 Free Software Foundation, Inc.
+ 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
static void record_one_set (int, rtx);
static void record_set_info (rtx, const_rtx, void *);
static void compute_sets (void);
-static void hash_scan_insn (rtx, struct hash_table *, int);
+static void hash_scan_insn (rtx, struct hash_table *);
static void hash_scan_set (rtx, rtx, struct hash_table *);
static void hash_scan_clobber (rtx, rtx, struct hash_table *);
static void hash_scan_call (rtx, rtx, struct hash_table *);
static void free_modify_mem_tables (void);
static rtx gcse_emit_move_after (rtx, rtx, rtx);
static void local_cprop_find_used_regs (rtx *, void *);
-static bool do_local_cprop (rtx, rtx, bool, rtx*);
-static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
+static bool do_local_cprop (rtx, rtx, bool);
static void local_cprop_pass (bool);
static bool is_too_expensive (const char *);
+
+#define GNEW(T) ((T *) gmalloc (sizeof (T)))
+#define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
+
+#define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
+#define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
+#define GRESIZEVEC(T, P, N) ((T *) grealloc ((void *) (P), sizeof (T) * (N)))
+
+#define GNEWVAR(T, S) ((T *) gmalloc ((S)))
+#define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
+#define GRESIZEVAR(T, P, S) ((T *) grealloc ((P), (S)))
+
+#define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
+#define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
\f
/* Entry point for global common subexpression elimination.
/* We do not construct an accurate cfg in functions which call
setjmp, so just punt to be safe. */
- if (current_function_calls_setjmp)
+ if (cfun->calls_setjmp)
return 0;
/* Assume that we do not need to run jump optimizations after gcse. */
pass = 0;
initial_bytes_used = bytes_used;
max_pass_bytes = 0;
- gcse_obstack_bottom = gcse_alloc (1);
+ gcse_obstack_bottom = GOBNEWVAR (char, 1);
changed = 1;
while (changed && pass < MAX_GCSE_PASSES)
{
/* Don't allow constant propagation to modify jumps
during this pass. */
- timevar_push (TV_CPROP1);
- changed = one_cprop_pass (pass + 1, false, false);
- timevar_pop (TV_CPROP1);
+ if (dbg_cnt (cprop1))
+ {
+ timevar_push (TV_CPROP1);
+ changed = one_cprop_pass (pass + 1, false, false);
+ timevar_pop (TV_CPROP1);
+ }
- if (optimize_size)
- /* Do nothing. */ ;
- else
+ if (optimize_function_for_speed_p (cfun))
{
timevar_push (TV_PRE);
changed |= one_pre_gcse_pass (pass + 1);
if (changed)
{
free_modify_mem_tables ();
- modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
- canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
+ modify_mem_list = GCNEWVEC (rtx, last_basic_block);
+ canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
}
free_reg_set_mem ();
alloc_reg_set_mem (max_reg_num ());
for code size -- it rarely makes programs faster, and can make
them bigger if we did partial redundancy elimination (when optimizing
for space, we don't run the partial redundancy algorithms). */
- if (optimize_size)
+ if (optimize_function_for_size_p (cfun))
{
timevar_push (TV_HOIST);
max_gcse_regno = max_reg_num ();
/* Do one last pass of copy propagation, including cprop into
conditional jumps. */
- max_gcse_regno = max_reg_num ();
- alloc_gcse_mem ();
- /* This time, go ahead and allow cprop to alter jumps. */
- timevar_push (TV_CPROP2);
- one_cprop_pass (pass + 1, true, true);
- timevar_pop (TV_CPROP2);
- free_gcse_mem ();
+ if (dbg_cnt (cprop2))
+ {
+ max_gcse_regno = max_reg_num ();
+ alloc_gcse_mem ();
+
+ /* This time, go ahead and allow cprop to alter jumps. */
+ timevar_push (TV_CPROP2);
+ one_cprop_pass (pass + 1, true, true);
+ timevar_pop (TV_CPROP2);
+ free_gcse_mem ();
+ }
if (dump_file)
{
/* We are finished with alias. */
end_alias_analysis ();
- if (!optimize_size && flag_gcse_sm)
+ if (optimize_function_for_speed_p (cfun) && flag_gcse_sm)
{
timevar_push (TV_LSM);
store_motion ();
but we should never see those anyway, so this is OK.) */
max_uid = get_max_uid ();
- uid_cuid = gcalloc (max_uid + 1, sizeof (int));
+ uid_cuid = GCNEWVEC (int, max_uid + 1);
i = 0;
FOR_EACH_BB (bb)
FOR_BB_INSNS (bb, insn)
reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
/* Allocate array to keep a list of insns which modify memory in each
basic block. */
- modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
- canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
+ modify_mem_list = GCNEWVEC (rtx, last_basic_block);
+ canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
modify_mem_list_set = BITMAP_ALLOC (NULL);
blocks_with_calls = BITMAP_ALLOC (NULL);
}
alloc_reg_set_mem (int n_regs)
{
reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
- reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *));
+ reg_set_table = GCNEWVEC (struct reg_set *, reg_set_table_size);
gcc_obstack_init (®_set_obstack);
}
{
int new_size = regno + REG_SET_TABLE_SLOP;
- reg_set_table = grealloc (reg_set_table,
- new_size * sizeof (struct reg_set *));
+ reg_set_table = GRESIZEVEC (struct reg_set *, reg_set_table, new_size);
memset (reg_set_table + reg_set_table_size, 0,
(new_size - reg_set_table_size) * sizeof (struct reg_set *));
reg_set_table_size = new_size;
}
- new_reg_info = obstack_alloc (®_set_obstack, sizeof (struct reg_set));
+ new_reg_info = XOBNEW (®_set_obstack, struct reg_set);
bytes_used += sizeof (struct reg_set);
new_reg_info->bb_index = BLOCK_NUM (insn);
new_reg_info->next = reg_set_table[regno];
if (! found)
{
- cur_expr = gcse_alloc (sizeof (struct expr));
+ cur_expr = GOBNEW (struct expr);
bytes_used += sizeof (struct expr);
if (table->table[hash] == NULL)
/* This is the first pattern that hashed to this index. */
else
{
/* First occurrence of this expression in this basic block. */
- antic_occr = gcse_alloc (sizeof (struct occr));
+ antic_occr = GOBNEW (struct occr);
bytes_used += sizeof (struct occr);
antic_occr->insn = insn;
antic_occr->next = cur_expr->antic_occr;
else
{
/* First occurrence of this expression in this basic block. */
- avail_occr = gcse_alloc (sizeof (struct occr));
+ avail_occr = GOBNEW (struct occr);
bytes_used += sizeof (struct occr);
avail_occr->insn = insn;
avail_occr->next = cur_expr->avail_occr;
if (! found)
{
- cur_expr = gcse_alloc (sizeof (struct expr));
+ cur_expr = GOBNEW (struct expr);
bytes_used += sizeof (struct expr);
if (table->table[hash] == NULL)
/* This is the first pattern that hashed to this index. */
else
{
/* First occurrence of this expression in this basic block. */
- cur_occr = gcse_alloc (sizeof (struct occr));
+ cur_occr = GOBNEW (struct occr);
bytes_used += sizeof (struct occr);
cur_occr->insn = insn;
unsigned int regno = REGNO (dest);
rtx tmp;
- /* See if a REG_NOTE shows this equivalent to a simpler expression.
+ /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
+
This allows us to do a single GCSE pass and still eliminate
redundant constants, addresses or other expressions that are
- constructed with multiple instructions. */
+ constructed with multiple instructions.
+
+ However, keep the original SRC if INSN is a simple reg-reg move. In
+ In this case, there will almost always be a REG_EQUAL note on the
+ insn that sets SRC. By recording the REG_EQUAL value here as SRC
+ for INSN, we miss copy propagation opportunities and we perform the
+ same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
+ do more than one PRE GCSE pass.
+
+ Note that this does not impede profitable constant propagations. We
+ "look through" reg-reg sets in lookup_avail_set. */
note = find_reg_equal_equiv_note (insn);
if (note != 0
+ && REG_NOTE_KIND (note) == REG_EQUAL
+ && !REG_P (src)
&& (table->set_p
? gcse_constant_p (XEXP (note, 0))
: want_to_gcse_p (XEXP (note, 0))))
are also in the PARALLEL. Later.
If SET_P is nonzero, this is for the assignment hash table,
- otherwise it is for the expression hash table.
- If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
- not record any expressions. */
+ otherwise it is for the expression hash table. */
static void
-hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block)
+hash_scan_insn (rtx insn, struct hash_table *table)
{
rtx pat = PATTERN (insn);
int i;
- if (in_libcall_block)
- return;
-
/* Pick out the sets of INSN and for other forms of instructions record
what's been modified. */
unsigned int *hash_val;
struct expr *expr;
- flat_table = xcalloc (table->n_elems, sizeof (struct expr *));
- hash_val = xmalloc (table->n_elems * sizeof (unsigned int));
+ flat_table = XCNEWVEC (struct expr *, table->n_elems);
+ hash_val = XNEWVEC (unsigned int, table->n_elems);
for (i = 0; i < (int) table->size; i++)
for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
/* re-Cache any INSN_LIST nodes we have allocated. */
clear_modify_mem_tables ();
/* Some working arrays used to track first and last set in each block. */
- reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
+ reg_avail_info = GNEWVEC (struct reg_avail_info, max_gcse_regno);
for (i = 0; i < max_gcse_regno; ++i)
reg_avail_info[i].last_bb = NULL;
{
rtx insn;
unsigned int regno;
- int in_libcall_block;
/* First pass over the instructions records information used to
determine when registers and memory are first and last set.
BB_HEAD (current_bb), table);
/* The next pass builds the hash table. */
- in_libcall_block = 0;
FOR_BB_INSNS (current_bb, insn)
if (INSN_P (insn))
- {
- if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- in_libcall_block = 1;
- else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
- in_libcall_block = 0;
- hash_scan_insn (insn, table, in_libcall_block);
- if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
- in_libcall_block = 0;
- }
+ hash_scan_insn (insn, table);
}
free (reg_avail_info);
??? Later take some measurements. */
table->size |= 1;
n = table->size * sizeof (struct expr *);
- table->table = gmalloc (n);
+ table->table = GNEWVAR (struct expr *, n);
table->set_p = set_p;
}
static void
mark_call (rtx insn)
{
- if (! CONST_OR_PURE_CALL_P (insn))
+ if (! RTL_CONST_OR_PURE_CALL_P (insn))
record_last_mem_set_info (insn);
}
static int
cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
{
- rtx new, set_src, note_src;
+ rtx new_rtx, set_src, note_src;
rtx set = pc_set (jump);
rtx note = find_reg_equal_equiv_note (jump);
else
setcc = NULL_RTX;
- new = simplify_replace_rtx (set_src, from, src);
+ new_rtx = simplify_replace_rtx (set_src, from, src);
/* If no simplification can be made, then try the next register. */
- if (rtx_equal_p (new, SET_SRC (set)))
+ if (rtx_equal_p (new_rtx, SET_SRC (set)))
return 0;
/* If this is now a no-op delete it, otherwise this must be a valid insn. */
- if (new == pc_rtx)
+ if (new_rtx == pc_rtx)
delete_insn (jump);
else
{
/* Ensure the value computed inside the jump insn to be equivalent
to one computed by setcc. */
- if (setcc && modified_in_p (new, setcc))
+ if (setcc && modified_in_p (new_rtx, setcc))
return 0;
- if (! validate_change (jump, &SET_SRC (set), new, 0))
+ if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
{
/* When (some) constants are not valid in a comparison, and there
are two registers to be replaced by constants before the entire
we need to attach a note to the branch itself to make this
optimization work. */
- if (!rtx_equal_p (new, note_src))
- set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
+ if (!rtx_equal_p (new_rtx, note_src))
+ set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
return 0;
}
/* If a conditional jump has been changed into unconditional jump, remove
the jump and make the edge fallthru - this is always called in
cfglayout mode. */
- if (new != pc_rtx && simplejump_p (jump))
+ if (new_rtx != pc_rtx && simplejump_p (jump))
{
edge e;
edge_iterator ei;
find_used_regs (xptr, data);
}
-/* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
- their REG_EQUAL notes need updating. */
+/* Try to perform local const/copy propagation on X in INSN.
+ If ALTER_JUMPS is false, changing jump insns is not allowed. */
static bool
-do_local_cprop (rtx x, rtx insn, bool alter_jumps, rtx *libcall_sp)
+do_local_cprop (rtx x, rtx insn, bool alter_jumps)
{
rtx newreg = NULL, newcnst = NULL;
rtx this_rtx = l->loc;
rtx note;
- /* Don't CSE non-constant values out of libcall blocks. */
- if (l->in_libcall && ! CONSTANT_P (this_rtx))
- continue;
-
if (gcse_constant_p (this_rtx))
newcnst = this_rtx;
if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
}
if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
{
- /* If we find a case where we can't fix the retval REG_EQUAL notes
- match the new register, we either have to abandon this replacement
- or fix delete_trivially_dead_insns to preserve the setting insn,
- or make it delete the REG_EQUAL note, and fix up all passes that
- require the REG_EQUAL note there. */
- bool adjusted;
-
- adjusted = adjust_libcall_notes (x, newcnst, insn, libcall_sp);
- gcc_assert (adjusted);
-
if (dump_file != NULL)
{
fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
}
else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
{
- adjust_libcall_notes (x, newreg, insn, libcall_sp);
if (dump_file != NULL)
{
fprintf (dump_file,
return false;
}
-/* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
- their REG_EQUAL notes need updating to reflect that OLDREG has been
- replaced with NEWVAL in INSN. Return true if all substitutions could
- be made. */
-static bool
-adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp)
-{
- rtx end;
-
- while ((end = *libcall_sp++))
- {
- rtx note = find_reg_equal_equiv_note (end);
-
- if (! note)
- continue;
-
- if (REG_P (newval))
- {
- if (reg_set_between_p (newval, PREV_INSN (insn), end))
- {
- do
- {
- note = find_reg_equal_equiv_note (end);
- if (! note)
- continue;
- if (reg_mentioned_p (newval, XEXP (note, 0)))
- return false;
- }
- while ((end = *libcall_sp++));
- return true;
- }
- }
- XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), oldreg, newval);
- df_notes_rescan (end);
- insn = end;
- }
- return true;
-}
-
-#define MAX_NESTED_LIBCALLS 9
-
/* Do local const/copy propagation (i.e. within each basic block).
If ALTER_JUMPS is true, allow propagating into jump insns, which
could modify the CFG. */
basic_block bb;
rtx insn;
struct reg_use *reg_used;
- rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
bool changed = false;
cselib_init (false);
- libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
- *libcall_sp = 0;
FOR_EACH_BB (bb)
{
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
{
- rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
-
- if (note)
- {
- gcc_assert (libcall_sp != libcall_stack);
- *--libcall_sp = XEXP (note, 0);
- }
- note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
- if (note)
- libcall_sp++;
- note = find_reg_equal_equiv_note (insn);
+ rtx note = find_reg_equal_equiv_note (insn);
do
{
reg_use_count = 0;
for (reg_used = ®_use_table[0]; reg_use_count > 0;
reg_used++, reg_use_count--)
{
- if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
- libcall_sp))
+ if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps))
{
changed = true;
break;
cselib_process_insn (insn);
}
- /* Forget everything at the end of a basic block. Make sure we are
- not inside a libcall, they should never cross basic blocks. */
+ /* Forget everything at the end of a basic block. */
cselib_clear_table ();
- gcc_assert (libcall_sp == &libcall_stack[MAX_NESTED_LIBCALLS]);
}
cselib_finish ();
{
basic_block bb, dest;
unsigned int count;
- rtx cond, new;
+ rtx cond, new_rtx;
count = 0;
FOR_EACH_BB (bb)
if (dest && single_pred_p (dest)
&& dest != EXIT_BLOCK_PTR)
{
- new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
+ new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
XEXP (cond, 1));
- implicit_sets[dest->index] = new;
+ implicit_sets[dest->index] = new_rtx;
if (dump_file)
{
fprintf(dump_file, "Implicit set of reg %d in ",
unsigned int regno = REGNO (reg_used->reg_rtx);
basic_block dest, old_dest;
struct expr *set;
- rtx src, new;
+ rtx src, new_rtx;
if (regno >= max_gcse_regno)
continue;
SET_DEST (PATTERN (setcc)),
SET_SRC (PATTERN (setcc)));
- new = simplify_replace_rtx (src, reg_used->reg_rtx,
+ new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
SET_SRC (set->expr));
/* Jump bypassing may have already placed instructions on
has instructions associated with it, as these insns won't
get executed if the incoming edge is redirected. */
- if (new == pc_rtx)
+ if (new_rtx == pc_rtx)
{
edest = FALLTHRU_EDGE (bb);
dest = edest->insns.r ? NULL : edest->dest;
}
- else if (GET_CODE (new) == LABEL_REF)
+ else if (GET_CODE (new_rtx) == LABEL_REF)
{
- dest = BLOCK_FOR_INSN (XEXP (new, 0));
+ dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
/* Don't bypass edges containing instructions. */
edest = find_edge (bb, dest);
if (edest && edest->insns.r)
static rtx
gcse_emit_move_after (rtx src, rtx dest, rtx insn)
{
- rtx new;
+ rtx new_rtx;
rtx set = single_set (insn), set2;
rtx note;
rtx eqv;
/* This should never fail since we're creating a reg->reg copy
we've verified to be valid. */
- new = emit_insn_after (gen_move_insn (dest, src), insn);
+ new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
/* Note the equivalence for local CSE pass. */
- set2 = single_set (new);
+ set2 = single_set (new_rtx);
if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
- return new;
+ return new_rtx;
if ((note = find_reg_equal_equiv_note (insn)))
eqv = XEXP (note, 0);
else
eqv = SET_SRC (set);
- set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
+ set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
- return new;
+ return new_rtx;
}
/* Delete redundant computations.
expressions into. Get the mode for the new pseudo from
the mode of the original destination pseudo. */
if (expr->reaching_reg == NULL)
- expr->reaching_reg
- = gen_reg_rtx (GET_MODE (SET_DEST (set)));
+ expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
delete_insn (insn);
such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
notes. */
gcc_assert (!JUMP_P (insn));
- REG_NOTES (insn)
- = gen_rtx_INSN_LIST (REG_LABEL_OPERAND, XEXP (x, 0),
- REG_NOTES (insn));
+ add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
+
if (LABEL_P (XEXP (x, 0)))
LABEL_NUSES (XEXP (x, 0))++;
FOR_EACH_BB (bb)
{
- /* Note that flow inserted a nop a the end of basic blocks that
+ /* Note that flow inserted a nop at the end of basic blocks that
end in call instructions for reasons other than abnormal
control flow. */
if (! CALL_P (BB_END (bb)))
from the mode of the original destination pseudo. */
if (expr->reaching_reg == NULL)
expr->reaching_reg
- = gen_reg_rtx (GET_MODE (SET_DEST (set)));
+ = gen_reg_rtx_and_attrs (SET_DEST (set));
gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
delete_insn (insn);
pre_ldst_expr_hash (const void *p)
{
int do_not_record_p = 0;
- const struct ls_expr *x = p;
+ const struct ls_expr *const x = (const struct ls_expr *) p;
return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
}
static int
pre_ldst_expr_eq (const void *p1, const void *p2)
{
- const struct ls_expr *ptr1 = p1, *ptr2 = p2;
+ const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
+ *const ptr2 = (const struct ls_expr *) p2;
return expr_equiv_p (ptr1->pattern, ptr2->pattern);
}
slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
if (!slot || ((struct ls_expr *)*slot)->invalid)
return NULL;
- return *slot;
+ return (struct ls_expr *) *slot;
}
/* Assign each element of the list of mems a monotonically increasing value. */
rtx pat = PATTERN (insn);
rtx src = SET_SRC (pat);
rtx reg = expr->reaching_reg;
- rtx copy, new;
+ rtx copy, new_rtx;
/* If we've already copied it, continue. */
if (expr->reaching_reg == src)
}
copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
- new = emit_insn_before (copy, insn);
- record_one_set (REGNO (reg), new);
+ new_rtx = emit_insn_before (copy, insn);
+ record_one_set (REGNO (reg), new_rtx);
SET_SRC (pat) = reg;
df_insn_rescan (insn);
reg_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
void *data)
{
- sbitmap bb_reg = data;
+ sbitmap bb_reg = (sbitmap) data;
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
reg_clear_last_set (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
void *data)
{
- int *dead_vec = data;
+ int *dead_vec = (int *) data;
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
{
/* A normal or pure call might read from pattern,
but a const call will not. */
- if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn))
+ if (!RTL_CONST_CALL_P (insn))
return true;
/* But even a const call reads its parameters. Check whether the
are any side effects. */
if (TEST_BIT (ae_gen[bb->index], ptr->index))
{
- rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
+ rtx r = gen_reg_rtx_and_attrs (ptr->pattern);
if (dump_file)
fprintf (dump_file, "Removing redundant store:\n");
replace_store_insn (r, XEXP (st, 0), bb, ptr);
static void
replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
{
- rtx insn, mem, note, set, ptr, pair;
+ rtx insn, mem, note, set, ptr;
mem = smexpr->pattern;
insn = gen_move_insn (reg, SET_SRC (single_set (del)));
break;
}
- /* Move the notes from the deleted insn to its replacement, and patch
- up the LIBCALL notes. */
+ /* Move the notes from the deleted insn to its replacement. */
REG_NOTES (insn) = REG_NOTES (del);
- note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
- if (note)
- {
- pair = XEXP (note, 0);
- note = find_reg_note (pair, REG_LIBCALL, NULL_RTX);
- XEXP (note, 0) = insn;
- }
- note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
- if (note)
- {
- pair = XEXP (note, 0);
- note = find_reg_note (pair, REG_RETVAL, NULL_RTX);
- XEXP (note, 0) = insn;
- }
-
/* Emit the insn AFTER all the notes are transferred.
This is cheaper since we avoid df rescanning for the note change. */
insn = emit_insn_after (insn, del);
rtx reg, i, del;
if (expr->reaching_reg == NULL_RTX)
- expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
+ expr->reaching_reg = gen_reg_rtx_and_attrs (expr->pattern);
reg = expr->reaching_reg;
/* We do not construct an accurate cfg in functions which call
setjmp, so just punt to be safe. */
- if (current_function_calls_setjmp)
+ if (cfun->calls_setjmp)
return 0;
/* Identify the basic block information for this function, including
static bool
gate_handle_jump_bypass (void)
{
- return optimize > 0 && flag_gcse;
+ return optimize > 0 && flag_gcse
+ && dbg_cnt (jump_bypass);
}
/* Perform jump bypassing and control flow optimizations. */
return 0;
}
-struct tree_opt_pass pass_jump_bypass =
+struct rtl_opt_pass pass_jump_bypass =
{
+ {
+ RTL_PASS,
"bypass", /* name */
gate_handle_jump_bypass, /* gate */
rest_of_handle_jump_bypass, /* execute */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_dump_func |
- TODO_ggc_collect | TODO_verify_flow, /* todo_flags_finish */
- 'G' /* letter */
+ TODO_ggc_collect | TODO_verify_flow /* todo_flags_finish */
+ }
};
static bool
gate_handle_gcse (void)
{
- return optimize > 0 && flag_gcse;
+ return optimize > 0 && flag_gcse
+ && dbg_cnt (gcse);
}
return 0;
}
-struct tree_opt_pass pass_gcse =
+struct rtl_opt_pass pass_gcse =
{
+ {
+ RTL_PASS,
"gcse1", /* name */
gate_handle_gcse, /* gate */
rest_of_handle_gcse, /* execute */
0, /* todo_flags_start */
TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func |
- TODO_verify_flow | TODO_ggc_collect, /* todo_flags_finish */
- 'G' /* letter */
+ TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
+ }
};