/* Reload pseudo regs into hard regs for insns that require hard regs.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
#include "regs.h"
#include "addresses.h"
#include "basic-block.h"
+#include "df.h"
#include "reload.h"
#include "recog.h"
#include "output.h"
-#include "real.h"
#include "toplev.h"
#include "except.h"
#include "tree.h"
-#include "df.h"
+#include "ira.h"
#include "target.h"
-#include "dse.h"
+#include "emit-rtl.h"
/* This file contains the reload pass of the compiler, which is
run after register allocation has been done. It checks that
/* Record which pseudos needed to be spilled. */
static regset_head spilled_pseudos;
+/* Record which pseudos changed their allocation in finish_spills. */
+static regset_head changed_allocation_pseudos;
+
/* Used for communication between order_regs_for_reload and count_pseudo.
Used to avoid counting one pseudo twice. */
static regset_head pseudos_counted;
int to; /* Register number used as replacement. */
HOST_WIDE_INT initial_offset; /* Initial difference between values. */
int can_eliminate; /* Nonzero if this elimination can be done. */
- int can_eliminate_previous; /* Value of CAN_ELIMINATE in previous scan over
- insns made by reload. */
+ int can_eliminate_previous; /* Value returned by TARGET_CAN_ELIMINATE
+ target hook in previous scan over insns
+ made by reload. */
HOST_WIDE_INT offset; /* Current offset between the two regs. */
HOST_WIDE_INT previous_offset;/* Offset at end of previous insn. */
int ref_outside_mem; /* "to" has been referenced outside a MEM. */
static char *offsets_known_at;
static HOST_WIDE_INT (*offsets_at)[NUM_ELIMINABLE_REGS];
+/* Stack of addresses where an rtx has been changed. We can undo the
+ changes by popping items off the stack and restoring the original
+ value at each location.
+
+ We use this simplistic undo capability rather than copy_rtx as copy_rtx
+ will not make a deep copy of a normally sharable rtx, such as
+ (const (plus (symbol_ref) (const_int))). If such an expression appears
+ as R1 in gen_reload_chain_without_interm_reg_p, then a shared
+ rtx expression would be changed. See PR 42431. */
+
+typedef rtx *rtx_p;
+DEF_VEC_P(rtx_p);
+DEF_VEC_ALLOC_P(rtx_p,heap);
+static VEC(rtx_p,heap) *substitute_stack;
+
/* Number of labels in the current function. */
static int num_labels;
static void spill_failure (rtx, enum reg_class);
static void count_spilled_pseudo (int, int, int);
static void delete_dead_insn (rtx);
-static void alter_reg (int, int);
+static void alter_reg (int, int, bool);
static void set_label_offsets (rtx, rtx, int);
static void check_eliminable_occurrences (rtx);
static void elimination_effects (rtx, enum machine_mode);
#ifdef AUTO_INC_DEC
static void add_auto_inc_notes (rtx, rtx);
#endif
-static void copy_eh_notes (rtx, rtx);
+static void substitute (rtx *, const_rtx, rtx);
+static bool gen_reload_chain_without_interm_reg_p (int, int);
static int reloads_conflict (int, int);
static rtx gen_reload (rtx, rtx, int, enum reload_type);
static rtx emit_insn_if_valid_for_reload (rtx);
/* Initialize obstack for our rtl allocation. */
gcc_obstack_init (&reload_obstack);
- reload_startobj = obstack_alloc (&reload_obstack, 0);
+ reload_startobj = XOBNEWVAR (&reload_obstack, char, 0);
INIT_REG_SET (&spilled_pseudos);
+ INIT_REG_SET (&changed_allocation_pseudos);
INIT_REG_SET (&pseudos_counted);
}
if (unused_insn_chains == 0)
{
- c = obstack_alloc (&reload_obstack, sizeof (struct insn_chain));
+ c = XOBNEW (&reload_obstack, struct insn_chain);
INIT_REG_SET (&c->live_throughout);
INIT_REG_SET (&c->dead_or_set);
}
if (r < 0)
{
- /* reload_combine uses the information from
- DF_LIVE_IN (BASIC_BLOCK), which might still
- contain registers that have not actually been allocated
- since they have an equivalence. */
- gcc_assert (reload_completed);
+ /* reload_combine uses the information from DF_LIVE_IN,
+ which might still contain registers that have not
+ actually been allocated since they have an
+ equivalence. */
+ gcc_assert (ira_conflicts_p || reload_completed);
}
else
add_to_hard_reg_set (to, PSEUDO_REGNO_MODE (regno), r);
/* If we're not optimizing, then just err on the safe side. */
if (!optimize)
return true;
-
+
/* First determine which blocks can reach exit via normal paths. */
- tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
FOR_EACH_BB (bb)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
*tos++ = EXIT_BLOCK_PTR;
-
+
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
{
static int something_needs_elimination;
/* Set during calculate_needs if an insn needs an operand changed. */
static int something_needs_operands_changed;
+/* Set by alter_regs if we spilled a register to the stack. */
+static bool something_was_spilled;
/* Nonzero means we couldn't get enough spill regs. */
static int failure;
+/* Temporary array of pseudo-register number. */
+static int *temp_pseudo_reg_arr;
+
/* Main entry point for the reload pass.
FIRST is the first insn of the function being compiled.
int
reload (rtx first, int global)
{
- int i;
+ int i, n;
rtx insn;
struct elim_table *ep;
basic_block bb;
failure = 0;
- reload_firstobj = obstack_alloc (&reload_obstack, 0);
+ reload_firstobj = XOBNEWVAR (&reload_obstack, char, 0);
/* Make sure that the last insn in the chain
is not something that needs reloading. */
&& GET_MODE (insn) != VOIDmode)
PUT_MODE (insn, VOIDmode);
- if (INSN_P (insn))
+ if (NONDEBUG_INSN_P (insn))
scan_paradoxical_subregs (PATTERN (insn));
if (set != 0 && REG_P (SET_DEST (set)))
offsets_known_at = XNEWVEC (char, num_labels);
offsets_at = (HOST_WIDE_INT (*)[NUM_ELIMINABLE_REGS]) xmalloc (num_labels * NUM_ELIMINABLE_REGS * sizeof (HOST_WIDE_INT));
- /* Alter each pseudo-reg rtx to contain its hard reg number.
- Assign stack slots to the pseudos that lack hard regs or equivalents.
+ /* Alter each pseudo-reg rtx to contain its hard reg number. Assign
+ stack slots to the pseudos that lack hard regs or equivalents.
Do not touch virtual registers. */
- for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
- alter_reg (i, -1);
+ temp_pseudo_reg_arr = XNEWVEC (int, max_regno - LAST_VIRTUAL_REGISTER - 1);
+ for (n = 0, i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ temp_pseudo_reg_arr[n++] = i;
+
+ if (ira_conflicts_p)
+ /* Ask IRA to order pseudo-registers for better stack slot
+ sharing. */
+ ira_sort_regnos_for_alter_reg (temp_pseudo_reg_arr, n, reg_max_ref_width);
+
+ for (i = 0; i < n; i++)
+ alter_reg (temp_pseudo_reg_arr[i], -1, false);
/* If we have some registers we think can be eliminated, scan all insns to
see if there is an insn that sets one of these registers to something
HOST_WIDE_INT starting_frame_size;
starting_frame_size = get_frame_size ();
+ something_was_spilled = false;
set_initial_elim_offsets ();
set_initial_label_offsets ();
for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
if (reg_renumber[i] < 0 && reg_equiv_memory_loc[i])
{
- rtx x = eliminate_regs (reg_equiv_memory_loc[i], 0, NULL_RTX);
+ rtx x = eliminate_regs (reg_equiv_memory_loc[i], VOIDmode,
+ NULL_RTX);
- if (strict_memory_address_p (GET_MODE (regno_reg_rtx[i]),
- XEXP (x, 0)))
+ if (strict_memory_address_addr_space_p
+ (GET_MODE (regno_reg_rtx[i]), XEXP (x, 0),
+ MEM_ADDR_SPACE (x)))
reg_equiv_mem[i] = x, reg_equiv_address[i] = 0;
else if (CONSTANT_P (XEXP (x, 0))
|| (REG_P (XEXP (x, 0))
the loop. */
reg_equiv_memory_loc[i] = 0;
reg_equiv_init[i] = 0;
- alter_reg (i, -1);
+ alter_reg (i, -1, true);
}
}
setup_save_areas ();
/* If we allocated another stack slot, redo elimination bookkeeping. */
- if (starting_frame_size != get_frame_size ())
+ if (something_was_spilled || starting_frame_size != get_frame_size ())
continue;
if (starting_frame_size && crtl->stack_alignment_needed)
{
{
save_call_clobbered_regs ();
/* That might have allocated new insn_chain structures. */
- reload_firstobj = obstack_alloc (&reload_obstack, 0);
+ reload_firstobj = XOBNEWVAR (&reload_obstack, char, 0);
}
calculate_needs_all_insns (global);
- CLEAR_REG_SET (&spilled_pseudos);
+ if (! ira_conflicts_p)
+ /* Don't do it for IRA. We need this info because we don't
+ change live_throughout and dead_or_set for chains when IRA
+ is used. */
+ CLEAR_REG_SET (&spilled_pseudos);
+
did_spill = 0;
something_changed = 0;
/* If we allocated any new memory locations, make another pass
since it might have changed elimination offsets. */
- if (starting_frame_size != get_frame_size ())
+ if (something_was_spilled || starting_frame_size != get_frame_size ())
something_changed = 1;
/* Even if the frame size remained the same, we might still have
- changed elimination offsets, e.g. if find_reloads called
+ changed elimination offsets, e.g. if find_reloads called
force_const_mem requiring the back end to allocate a constant
pool base register that needs to be saved on the stack. */
else if (!verify_initial_elim_offsets ())
if (! frame_pointer_needed)
FOR_EACH_BB (bb)
bitmap_clear_bit (df_get_live_in (bb), HARD_FRAME_POINTER_REGNUM);
-
+
/* Come here (with failure set nonzero) if we can't get enough spill
regs. */
failed:
+ CLEAR_REG_SET (&changed_allocation_pseudos);
CLEAR_REG_SET (&spilled_pseudos);
reload_in_progress = 0;
else if (reg_equiv_mem[i])
XEXP (reg_equiv_mem[i], 0) = addr;
}
+
+ /* We don't want complex addressing modes in debug insns
+ if simpler ones will do, so delegitimize equivalences
+ in debug insns. */
+ if (MAY_HAVE_DEBUG_INSNS && reg_renumber[i] < 0)
+ {
+ rtx reg = regno_reg_rtx[i];
+ rtx equiv = 0;
+ df_ref use, next;
+
+ if (reg_equiv_constant[i])
+ equiv = reg_equiv_constant[i];
+ else if (reg_equiv_invariant[i])
+ equiv = reg_equiv_invariant[i];
+ else if (reg && MEM_P (reg))
+ equiv = targetm.delegitimize_address (reg);
+ else if (reg && REG_P (reg) && (int)REGNO (reg) != i)
+ equiv = reg;
+
+ if (equiv == reg)
+ continue;
+
+ for (use = DF_REG_USE_CHAIN (i); use; use = next)
+ {
+ insn = DF_REF_INSN (use);
+
+ /* Make sure the next ref is for a different instruction,
+ so that we're not affected by the rescan. */
+ next = DF_REF_NEXT_REG (use);
+ while (next && DF_REF_INSN (next) == insn)
+ next = DF_REF_NEXT_REG (next);
+
+ if (DEBUG_INSN_P (insn))
+ {
+ if (!equiv)
+ {
+ INSN_VAR_LOCATION_LOC (insn) = gen_rtx_UNKNOWN_VAR_LOC ();
+ df_insn_rescan_debug_internal (insn);
+ }
+ else
+ INSN_VAR_LOCATION_LOC (insn)
+ = simplify_replace_rtx (INSN_VAR_LOCATION_LOC (insn),
+ reg, equiv);
+ }
+ }
+ }
}
/* We must set reload_completed now since the cleanup_subreg_operands call
notes. Delete all CLOBBER insns, except those that refer to the return
value and the special mem:BLK CLOBBERs added to prevent the scheduler
from misarranging variable-array code, and simplify (subreg (reg))
- operands. Also remove all REG_RETVAL and REG_LIBCALL notes since they
- are no longer useful or accurate. Strip and regenerate REG_INC notes
- that may have been moved around. */
+ operands. Strip and regenerate REG_INC notes that may have been moved
+ around. */
for (insn = first; insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
if (REG_NOTE_KIND (*pnote) == REG_DEAD
|| REG_NOTE_KIND (*pnote) == REG_UNUSED
- || REG_NOTE_KIND (*pnote) == REG_INC
- || REG_NOTE_KIND (*pnote) == REG_RETVAL
- || REG_NOTE_KIND (*pnote) == REG_LIBCALL)
+ || REG_NOTE_KIND (*pnote) == REG_INC)
*pnote = XEXP (*pnote, 1);
else
pnote = &XEXP (*pnote, 1);
}
}
- /* If we are doing stack checking, give a warning if this function's
- frame size is larger than we expect. */
- if (flag_stack_check && ! STACK_CHECK_BUILTIN)
+ /* If we are doing generic stack checking, give a warning if this
+ function's frame size is larger than we expect. */
+ if (flag_stack_check == GENERIC_STACK_CHECK)
{
HOST_WIDE_INT size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
static int verbose_warned = 0;
VEC_free (rtx, gc, reg_equiv_memory_loc_vec);
reg_equiv_memory_loc = 0;
+ free (temp_pseudo_reg_arr);
+
if (offsets_known_at)
free (offsets_known_at);
if (offsets_at)
REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = BITS_PER_UNIT;
#endif
+ VEC_free (rtx_p, heap, substitute_stack);
+
return failure;
}
copy_reloads (struct insn_chain *chain)
{
chain->n_reloads = n_reloads;
- chain->rld = obstack_alloc (&reload_obstack,
- n_reloads * sizeof (struct reload));
+ chain->rld = XOBNEWVEC (&reload_obstack, struct reload, n_reloads);
memcpy (chain->rld, rld, n_reloads * sizeof (struct reload));
- reload_insn_firstobj = obstack_alloc (&reload_obstack, 0);
+ reload_insn_firstobj = XOBNEWVAR (&reload_obstack, char, 0);
}
/* Walk the chain of insns, and determine for each whether it needs reloads
something_needs_elimination = 0;
- reload_insn_firstobj = obstack_alloc (&reload_obstack, 0);
+ reload_insn_firstobj = XOBNEWVAR (&reload_obstack, char, 0);
for (chain = reload_insn_chain; chain != 0; chain = next)
{
rtx insn = chain->insn;
{
rtx set = single_set (insn);
if (set
- && SET_SRC (set) == SET_DEST (set)
- && REG_P (SET_SRC (set))
- && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER)
+ &&
+ ((SET_SRC (set) == SET_DEST (set)
+ && REG_P (SET_SRC (set))
+ && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER)
+ || (REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))
+ && reg_renumber[REGNO (SET_SRC (set))] < 0
+ && reg_renumber[REGNO (SET_DEST (set))] < 0
+ && reg_equiv_memory_loc[REGNO (SET_SRC (set))] != NULL
+ && reg_equiv_memory_loc[REGNO (SET_DEST (set))] != NULL
+ && rtx_equal_p (reg_equiv_memory_loc
+ [REGNO (SET_SRC (set))],
+ reg_equiv_memory_loc
+ [REGNO (SET_DEST (set))]))))
{
+ if (ira_conflicts_p)
+ /* Inform IRA about the insn deletion. */
+ ira_mark_memory_move_deletion (REGNO (SET_DEST (set)),
+ REGNO (SET_SRC (set)));
delete_insn (insn);
/* Delete it from the reload chain. */
if (chain->prev)
return t;
/* Count all solitary classes before non-solitary ones. */
- t = ((reg_class_size[(int) rld[r2].class] == 1)
- - (reg_class_size[(int) rld[r1].class] == 1));
+ t = ((reg_class_size[(int) rld[r2].rclass] == 1)
+ - (reg_class_size[(int) rld[r1].rclass] == 1));
if (t != 0)
return t;
return t;
/* Consider reloads in order of increasing reg-class number. */
- t = (int) rld[r1].class - (int) rld[r2].class;
+ t = (int) rld[r1].rclass - (int) rld[r2].rclass;
if (t != 0)
return t;
only the first hard reg for a multi-reg pseudo. */
static int spill_add_cost[FIRST_PSEUDO_REGISTER];
+/* Map of hard regno to pseudo regno currently occupying the hard
+ reg. */
+static int hard_regno_to_pseudo_regno[FIRST_PSEUDO_REGISTER];
+
/* Update the spill cost arrays, considering that pseudo REG is live. */
static void
int nregs;
if (REGNO_REG_SET_P (&pseudos_counted, reg)
- || REGNO_REG_SET_P (&spilled_pseudos, reg))
+ || REGNO_REG_SET_P (&spilled_pseudos, reg)
+ /* Ignore spilled pseudo-registers which can be here only if IRA
+ is used. */
+ || (ira_conflicts_p && r < 0))
return;
SET_REGNO_REG_SET (&pseudos_counted, reg);
gcc_assert (r >= 0);
spill_add_cost[r] += freq;
-
nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (reg)];
while (nregs-- > 0)
- spill_cost[r + nregs] += freq;
+ {
+ hard_regno_to_pseudo_regno[r + nregs] = reg;
+ spill_cost[r + nregs] += freq;
+ }
}
/* Calculate the SPILL_COST and SPILL_ADD_COST arrays and determine the
memset (spill_cost, 0, sizeof spill_cost);
memset (spill_add_cost, 0, sizeof spill_add_cost);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ hard_regno_to_pseudo_regno[i] = -1;
/* Count number of uses of each hard reg by pseudo regs allocated to it
and then order them by decreasing use. First exclude hard registers
static void
count_spilled_pseudo (int spilled, int spilled_nregs, int reg)
{
+ int freq = REG_FREQ (reg);
int r = reg_renumber[reg];
int nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (reg)];
- if (REGNO_REG_SET_P (&spilled_pseudos, reg)
+ /* Ignore spilled pseudo-registers which can be here only if IRA is
+ used. */
+ if ((ira_conflicts_p && r < 0)
+ || REGNO_REG_SET_P (&spilled_pseudos, reg)
|| spilled + spilled_nregs <= r || r + nregs <= spilled)
return;
SET_REGNO_REG_SET (&spilled_pseudos, reg);
- spill_add_cost[r] -= REG_FREQ (reg);
+ spill_add_cost[r] -= freq;
while (nregs-- > 0)
- spill_cost[r + nregs] -= REG_FREQ (reg);
+ {
+ hard_regno_to_pseudo_regno[r + nregs] = -1;
+ spill_cost[r + nregs] -= freq;
+ }
}
/* Find reload register to use for reload number ORDER. */
struct reload *rl = rld + rnum;
int best_cost = INT_MAX;
int best_reg = -1;
- unsigned int i, j;
+ unsigned int i, j, n;
int k;
HARD_REG_SET not_usable;
HARD_REG_SET used_by_other_reload;
reg_set_iterator rsi;
+ static int regno_pseudo_regs[FIRST_PSEUDO_REGISTER];
+ static int best_regno_pseudo_regs[FIRST_PSEUDO_REGISTER];
COPY_HARD_REG_SET (not_usable, bad_spill_regs);
IOR_HARD_REG_SET (not_usable, bad_spill_regs_global);
- IOR_COMPL_HARD_REG_SET (not_usable, reg_class_contents[rl->class]);
+ IOR_COMPL_HARD_REG_SET (not_usable, reg_class_contents[rl->rclass]);
CLEAR_HARD_REG_SET (used_by_other_reload);
for (k = 0; k < order; k++)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
+#ifdef REG_ALLOC_ORDER
+ unsigned int regno = reg_alloc_order[i];
+#else
unsigned int regno = i;
+#endif
if (! TEST_HARD_REG_BIT (not_usable, regno)
&& ! TEST_HARD_REG_BIT (used_by_other_reload, regno)
}
if (! ok)
continue;
+
+ if (ira_conflicts_p)
+ {
+ /* Ask IRA to find a better pseudo-register for
+ spilling. */
+ for (n = j = 0; j < this_nregs; j++)
+ {
+ int r = hard_regno_to_pseudo_regno[regno + j];
+
+ if (r < 0)
+ continue;
+ if (n == 0 || regno_pseudo_regs[n - 1] != r)
+ regno_pseudo_regs[n++] = r;
+ }
+ regno_pseudo_regs[n++] = -1;
+ if (best_reg < 0
+ || ira_better_spill_reload_regno_p (regno_pseudo_regs,
+ best_regno_pseudo_regs,
+ rl->in, rl->out,
+ chain->insn))
+ {
+ best_reg = regno;
+ for (j = 0;; j++)
+ {
+ best_regno_pseudo_regs[j] = regno_pseudo_regs[j];
+ if (regno_pseudo_regs[j] < 0)
+ break;
+ }
+ }
+ continue;
+ }
+
if (rl->in && REG_P (rl->in) && REGNO (rl->in) == regno)
this_cost--;
if (rl->out && REG_P (rl->out) && REGNO (rl->out) == regno)
{
gcc_assert (spill_cost[best_reg + i] == 0);
gcc_assert (spill_add_cost[best_reg + i] == 0);
+ gcc_assert (hard_regno_to_pseudo_regno[best_reg + i] == -1);
SET_HARD_REG_BIT (used_spill_regs_local, best_reg + i);
}
return 1;
{
if (dump_file)
fprintf (dump_file, "reload failure for reload %d\n", r);
- spill_failure (chain->insn, rld[r].class);
+ spill_failure (chain->insn, rld[r].rclass);
failure = 1;
return;
}
INSN should be one of the insns which needed this particular spill reg. */
static void
-spill_failure (rtx insn, enum reg_class class)
+spill_failure (rtx insn, enum reg_class rclass)
{
if (asm_noperands (PATTERN (insn)) >= 0)
error_for_asm (insn, "can't find a register in class %qs while "
"reloading %<asm%>",
- reg_class_names[class]);
+ reg_class_names[rclass]);
else
{
error ("unable to find a register to spill in class %qs",
- reg_class_names[class]);
+ reg_class_names[rclass]);
if (dump_file)
{
can share one stack slot. */
static void
-alter_reg (int i, int from_reg)
+alter_reg (int i, int from_reg, bool dont_share_p)
{
/* When outputting an inline function, this can happen
for a reg that isn't actually used. */
&& (reg_equiv_invariant[i] == 0 || reg_equiv_init[i] == 0)
&& reg_equiv_memory_loc[i] == 0)
{
- rtx x;
+ rtx x = NULL_RTX;
enum machine_mode mode = GET_MODE (regno_reg_rtx[i]);
unsigned int inherent_size = PSEUDO_REGNO_BYTES (i);
unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
unsigned int min_align = reg_max_ref_width[i] * BITS_PER_UNIT;
int adjust = 0;
+ something_was_spilled = true;
+
+ if (ira_conflicts_p)
+ {
+ /* Mark the spill for IRA. */
+ SET_REGNO_REG_SET (&spilled_pseudos, i);
+ if (!dont_share_p)
+ x = ira_reuse_stack_slot (i, inherent_size, total_size);
+ }
+
+ if (x)
+ ;
+
/* Each pseudo reg has an inherent size which comes from its own mode,
and a total size which provides room for paradoxical subregs
which refer to the pseudo reg in wider modes.
enough inherent space and enough total space.
Otherwise, we allocate a new slot, making sure that it has no less
inherent space, and no less total space, then the previous slot. */
- if (from_reg == -1)
+ else if (from_reg == -1 || (!dont_share_p && ira_conflicts_p))
{
- alias_set_type alias_set = new_alias_set ();
+ rtx stack_slot;
/* No known place to spill from => no slot to reuse. */
x = assign_stack_local (mode, total_size,
min_align > inherent_align
|| total_size > inherent_size ? -1 : 0);
+
+ stack_slot = x;
+
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot. This is so we
+ can do a big-endian correction unconditionally below. */
if (BYTES_BIG_ENDIAN)
- /* Cancel the big-endian correction done in assign_stack_local.
- Get the address of the beginning of the slot.
- This is so we can do a big-endian correction unconditionally
- below. */
- adjust = inherent_size - total_size;
-
- /* Nothing can alias this slot except this pseudo. */
- set_mem_alias_set (x, alias_set);
- dse_record_singleton_alias_set (alias_set, mode);
+ {
+ adjust = inherent_size - total_size;
+ if (adjust)
+ stack_slot
+ = adjust_address_nv (x, mode_for_size (total_size
+ * BITS_PER_UNIT,
+ MODE_INT, 1),
+ adjust);
+ }
+
+ if (! dont_share_p && ira_conflicts_p)
+ /* Inform IRA about allocation a new stack slot. */
+ ira_mark_new_stack_slot (stack_slot, i, total_size);
}
/* Reuse a stack slot if possible. */
>= inherent_size)
&& MEM_ALIGN (spill_stack_slot[from_reg]) >= min_align)
x = spill_stack_slot[from_reg];
+
/* Allocate a bigger slot. */
else
{
|| total_size > inherent_size ? -1 : 0);
stack_slot = x;
- /* All pseudos mapped to this slot can alias each other. */
- if (spill_stack_slot[from_reg])
- {
- alias_set_type alias_set
- = MEM_ALIAS_SET (spill_stack_slot[from_reg]);
- set_mem_alias_set (x, alias_set);
- dse_invalidate_singleton_alias_set (alias_set);
- }
- else
- {
- alias_set_type alias_set = new_alias_set ();
- set_mem_alias_set (x, alias_set);
- dse_record_singleton_alias_set (alias_set, mode);
- }
-
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot. This is so we
+ can do a big-endian correction unconditionally below. */
if (BYTES_BIG_ENDIAN)
{
- /* Cancel the big-endian correction done in assign_stack_local.
- Get the address of the beginning of the slot.
- This is so we can do a big-endian correction unconditionally
- below. */
adjust = GET_MODE_SIZE (mode) - total_size;
if (adjust)
stack_slot
wrong mode, make a new stack slot. */
x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust);
- /* If we have a decl for the original register, set it for the
- memory. If this is a shared MEM, make a copy. */
- if (REG_EXPR (regno_reg_rtx[i])
- && DECL_P (REG_EXPR (regno_reg_rtx[i])))
- {
- rtx decl = DECL_RTL_IF_SET (REG_EXPR (regno_reg_rtx[i]));
-
- /* We can do this only for the DECLs home pseudo, not for
- any copies of it, since otherwise when the stack slot
- is reused, nonoverlapping_memrefs_p might think they
- cannot overlap. */
- if (decl && REG_P (decl) && REGNO (decl) == (unsigned) i)
- {
- if (from_reg != -1 && spill_stack_slot[from_reg] == x)
- x = copy_rtx (x);
-
- set_mem_attrs_from_reg (x, regno_reg_rtx[i]);
- }
- }
+ /* Set all of the memory attributes as appropriate for a spill. */
+ set_mem_attrs_for_spill (x);
/* Save the stack slot for later. */
reg_equiv_memory_loc[i] = x;
enum rtx_code code = GET_CODE (x);
struct elim_table *ep;
int regno;
- rtx new;
+ rtx new_rtx;
int i, j;
const char *fmt;
int copied = 0;
else if (reg_renumber && reg_renumber[regno] < 0
&& reg_equiv_invariant && reg_equiv_invariant[regno])
{
- if (may_use_invariant)
+ if (may_use_invariant || (insn && DEBUG_INSN_P (insn)))
return eliminate_regs_1 (copy_rtx (reg_equiv_invariant[regno]),
mem_mode, insn, true);
/* There exists at least one use of REGNO that cannot be
eliminated. Prevent the defining insn from being deleted. */
reg_equiv_init[regno] = NULL_RTX;
- alter_reg (regno, -1);
+ alter_reg (regno, -1, true);
}
return x;
We special-case the commonest situation in
eliminate_regs_in_insn, so just replace a PLUS with a
PLUS here, unless inside a MEM. */
- if (mem_mode != 0 && GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (mem_mode != 0 && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == - ep->previous_offset)
return ep->to_rtx;
else
&& reg_equiv_constant[REGNO (new0)] != 0)
new0 = reg_equiv_constant[REGNO (new0)];
- new = form_sum (new0, new1);
+ new_rtx = form_sum (GET_MODE (x), new0, new1);
/* As above, if we are not inside a MEM we do not want to
turn a PLUS into something else. We might try to do so here
for an addition of 0 if we aren't optimizing. */
- if (! mem_mode && GET_CODE (new) != PLUS)
- return gen_rtx_PLUS (GET_MODE (x), new, const0_rtx);
+ if (! mem_mode && GET_CODE (new_rtx) != PLUS)
+ return gen_rtx_PLUS (GET_MODE (x), new_rtx, const0_rtx);
else
- return new;
+ return new_rtx;
}
}
return x;
We ignore the possibility of overflow here. */
if (REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS];
ep++)
if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate)
{
if (! mem_mode
- /* Refs inside notes don't count for this purpose. */
+ /* Refs inside notes or in DEBUG_INSNs don't count for
+ this purpose. */
&& ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
- || GET_CODE (insn) == INSN_LIST)))
+ || GET_CODE (insn) == INSN_LIST
+ || DEBUG_INSN_P (insn))))
ep->ref_outside_mem = 1;
return
/* If we have something in XEXP (x, 0), the usual case, eliminate it. */
if (XEXP (x, 0))
{
- new = eliminate_regs_1 (XEXP (x, 0), mem_mode, insn, true);
- if (new != XEXP (x, 0))
+ new_rtx = eliminate_regs_1 (XEXP (x, 0), mem_mode, insn, true);
+ if (new_rtx != XEXP (x, 0))
{
/* If this is a REG_DEAD note, it is not valid anymore.
Using the eliminated version could result in creating a
REG_DEAD note for the stack or frame pointer. */
- if (GET_MODE (x) == REG_DEAD)
+ if (REG_NOTE_KIND (x) == REG_DEAD)
return (XEXP (x, 1)
? eliminate_regs_1 (XEXP (x, 1), mem_mode, insn, true)
: NULL_RTX);
- x = gen_rtx_EXPR_LIST (REG_NOTE_KIND (x), new, XEXP (x, 1));
+ x = alloc_reg_note (REG_NOTE_KIND (x), new_rtx, XEXP (x, 1));
}
}
strictly needed, but it simplifies the code. */
if (XEXP (x, 1))
{
- new = eliminate_regs_1 (XEXP (x, 1), mem_mode, insn, true);
- if (new != XEXP (x, 1))
+ new_rtx = eliminate_regs_1 (XEXP (x, 1), mem_mode, insn, true);
+ if (new_rtx != XEXP (x, 1))
return
- gen_rtx_fmt_ee (GET_CODE (x), GET_MODE (x), XEXP (x, 0), new);
+ gen_rtx_fmt_ee (GET_CODE (x), GET_MODE (x), XEXP (x, 0), new_rtx);
}
return x;
if (GET_CODE (XEXP (x, 1)) == PLUS
&& XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
{
- rtx new = eliminate_regs_1 (XEXP (XEXP (x, 1), 1), mem_mode,
+ rtx new_rtx = eliminate_regs_1 (XEXP (XEXP (x, 1), 1), mem_mode,
insn, true);
- if (new != XEXP (XEXP (x, 1), 1))
+ if (new_rtx != XEXP (XEXP (x, 1), 1))
return gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (x, 0),
gen_rtx_PLUS (GET_MODE (x),
- XEXP (x, 0), new));
+ XEXP (x, 0), new_rtx));
}
return x;
case POPCOUNT:
case PARITY:
case BSWAP:
- new = eliminate_regs_1 (XEXP (x, 0), mem_mode, insn, false);
- if (new != XEXP (x, 0))
- return gen_rtx_fmt_e (code, GET_MODE (x), new);
+ new_rtx = eliminate_regs_1 (XEXP (x, 0), mem_mode, insn, false);
+ if (new_rtx != XEXP (x, 0))
+ return gen_rtx_fmt_e (code, GET_MODE (x), new_rtx);
return x;
case SUBREG:
&& reg_equiv_memory_loc != 0
&& reg_equiv_memory_loc[REGNO (SUBREG_REG (x))] != 0)
{
- new = SUBREG_REG (x);
+ new_rtx = SUBREG_REG (x);
}
else
- new = eliminate_regs_1 (SUBREG_REG (x), mem_mode, insn, false);
+ new_rtx = eliminate_regs_1 (SUBREG_REG (x), mem_mode, insn, false);
- if (new != SUBREG_REG (x))
+ if (new_rtx != SUBREG_REG (x))
{
int x_size = GET_MODE_SIZE (GET_MODE (x));
- int new_size = GET_MODE_SIZE (GET_MODE (new));
+ int new_size = GET_MODE_SIZE (GET_MODE (new_rtx));
- if (MEM_P (new)
+ if (MEM_P (new_rtx)
&& ((x_size < new_size
#ifdef WORD_REGISTER_OPERATIONS
/* On these machines, combine can create rtl of the form
)
|| x_size == new_size)
)
- return adjust_address_nv (new, GET_MODE (x), SUBREG_BYTE (x));
+ return adjust_address_nv (new_rtx, GET_MODE (x), SUBREG_BYTE (x));
else
- return gen_rtx_SUBREG (GET_MODE (x), new, SUBREG_BYTE (x));
+ return gen_rtx_SUBREG (GET_MODE (x), new_rtx, SUBREG_BYTE (x));
}
return x;
case USE:
/* Handle insn_list USE that a call to a pure function may generate. */
- new = eliminate_regs_1 (XEXP (x, 0), 0, insn, false);
- if (new != XEXP (x, 0))
- return gen_rtx_USE (GET_MODE (x), new);
+ new_rtx = eliminate_regs_1 (XEXP (x, 0), VOIDmode, insn, false);
+ if (new_rtx != XEXP (x, 0))
+ return gen_rtx_USE (GET_MODE (x), new_rtx);
return x;
case CLOBBER:
+ gcc_assert (insn && DEBUG_INSN_P (insn));
+ break;
+
case ASM_OPERANDS:
case SET:
gcc_unreachable ();
{
if (*fmt == 'e')
{
- new = eliminate_regs_1 (XEXP (x, i), mem_mode, insn, false);
- if (new != XEXP (x, i) && ! copied)
+ new_rtx = eliminate_regs_1 (XEXP (x, i), mem_mode, insn, false);
+ if (new_rtx != XEXP (x, i) && ! copied)
{
x = shallow_copy_rtx (x);
copied = 1;
}
- XEXP (x, i) = new;
+ XEXP (x, i) = new_rtx;
}
else if (*fmt == 'E')
{
int copied_vec = 0;
for (j = 0; j < XVECLEN (x, i); j++)
{
- new = eliminate_regs_1 (XVECEXP (x, i, j), mem_mode, insn, false);
- if (new != XVECEXP (x, i, j) && ! copied_vec)
+ new_rtx = eliminate_regs_1 (XVECEXP (x, i, j), mem_mode, insn, false);
+ if (new_rtx != XVECEXP (x, i, j) && ! copied_vec)
{
rtvec new_v = gen_rtvec_v (XVECLEN (x, i),
XVEC (x, i)->elem);
XVEC (x, i) = new_v;
copied_vec = 1;
}
- XVECEXP (x, i, j) = new;
+ XVECEXP (x, i, j) = new_rtx;
}
}
}
if (GET_CODE (src) == PLUS
&& XEXP (src, 0) == SET_DEST (x)
- && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (src, 1)))
ep->offset -= INTVAL (XEXP (src, 1));
else
ep->can_eliminate = 0;
}
}
- elimination_effects (SET_DEST (x), 0);
- elimination_effects (SET_SRC (x), 0);
+ elimination_effects (SET_DEST (x), VOIDmode);
+ elimination_effects (SET_SRC (x), VOIDmode);
return;
case MEM:
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| GET_CODE (PATTERN (insn)) == ADDR_VEC
|| GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
- || GET_CODE (PATTERN (insn)) == ASM_INPUT);
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || DEBUG_INSN_P (insn));
+ if (DEBUG_INSN_P (insn))
+ INSN_VAR_LOCATION_LOC (insn)
+ = eliminate_regs (INSN_VAR_LOCATION_LOC (insn), VOIDmode, insn);
return 0;
}
rtx prev_insn, prev_set;
if (GET_CODE (base) == PLUS
- && GET_CODE (XEXP (base, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (base, 1)))
{
offset += INTVAL (XEXP (base, 1));
base = XEXP (base, 0);
plus_src = SET_SRC (old_set);
/* First see if the source is of the form (plus (...) CST). */
if (plus_src
- && GET_CODE (XEXP (plus_src, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (plus_src, 1)))
plus_cst_src = plus_src;
else if (REG_P (SET_SRC (old_set))
|| plus_src)
if ((REG_NOTE_KIND (links) == REG_EQUAL
|| REG_NOTE_KIND (links) == REG_EQUIV)
&& GET_CODE (XEXP (links, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (links, 0), 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (XEXP (links, 0), 1)))
{
plus_cst_src = XEXP (links, 0);
break;
{
rtx to_rtx = ep->to_rtx;
offset += ep->offset;
- offset = trunc_int_for_mode (offset, GET_MODE (reg));
+ offset = trunc_int_for_mode (offset, GET_MODE (plus_cst_src));
if (GET_CODE (XEXP (plus_cst_src, 0)) == SUBREG)
to_rtx = gen_lowpart (GET_MODE (XEXP (plus_cst_src, 0)),
/* First see if this insn remains valid when we make the
change. If not, try to replace the whole pattern with
a simple set (this may help if the original insn was a
- PARALLEL that was only recognized as single_set due to
+ PARALLEL that was only recognized as single_set due to
REG_UNUSED notes). If this isn't valid either, keep
the INSN_CODE the same and let reload fix it up. */
if (!validate_change (insn, &SET_SRC (old_set), new_src, 0))
}
/* Determine the effects of this insn on elimination offsets. */
- elimination_effects (old_body, 0);
+ elimination_effects (old_body, VOIDmode);
/* Eliminate all eliminable registers occurring in operands that
can be handled by reload. */
in_plus = true;
substed_operand[i]
- = eliminate_regs_1 (recog_data.operand[i], 0,
+ = eliminate_regs_1 (recog_data.operand[i], VOIDmode,
replace ? insn : NULL_RTX,
is_set_src || in_plus);
if (substed_operand[i] != orig_operand[i])
this point. */
*recog_data.operand_loc[i] = 0;
- /* If an output operand changed from a REG to a MEM and INSN is an
- insn, write a CLOBBER insn. */
+ /* If an output operand changed from a REG to a MEM and INSN is an
+ insn, write a CLOBBER insn. */
if (recog_data.operand_type[i] != OP_IN
&& REG_P (orig_operand[i])
&& MEM_P (substed_operand[i])
&& replace)
- emit_insn_after (gen_rtx_CLOBBER (VOIDmode, orig_operand[i]),
- insn);
+ emit_insn_after (gen_clobber (orig_operand[i]), insn);
}
}
{
/* Restore the old body. */
for (i = 0; i < recog_data.n_operands; i++)
- *recog_data.operand_loc[i] = orig_operand[i];
+ /* Restoring a top-level match_parallel would clobber the new_body
+ we installed in the insn. */
+ if (recog_data.operand_loc[i] != &PATTERN (insn))
+ *recog_data.operand_loc[i] = orig_operand[i];
for (i = 0; i < recog_data.n_dups; i++)
*recog_data.dup_loc[i] = orig_operand[(int) recog_data.dup_num[i]];
}
the pre-passes. */
if (val && REG_NOTES (insn) != 0)
REG_NOTES (insn)
- = eliminate_regs_1 (REG_NOTES (insn), 0, REG_NOTES (insn), true);
+ = eliminate_regs_1 (REG_NOTES (insn), VOIDmode, REG_NOTES (insn), true);
return val;
}
&& (GET_CODE (x) != SET
|| GET_CODE (SET_SRC (x)) != PLUS
|| XEXP (SET_SRC (x), 0) != dest
- || GET_CODE (XEXP (SET_SRC (x), 1)) != CONST_INT))
+ || !CONST_INT_P (XEXP (SET_SRC (x), 1))))
{
reg_eliminate[i].can_eliminate_previous
= reg_eliminate[i].can_eliminate = 0;
struct elim_table *ep;
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
- if ((ep->from == HARD_FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED)
+ if ((ep->from == HARD_FRAME_POINTER_REGNUM
+ && targetm.frame_pointer_required ())
#ifdef ELIMINABLE_REGS
- || ! CAN_ELIMINATE (ep->from, ep->to)
+ || ! targetm.can_eliminate (ep->from, ep->to)
#endif
)
ep->can_eliminate = 0;
frame_pointer_needed = 1;
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
{
- if (ep->can_eliminate && ep->from == FRAME_POINTER_REGNUM
- && ep->to != HARD_FRAME_POINTER_REGNUM)
+ if (ep->can_eliminate
+ && ep->from == FRAME_POINTER_REGNUM
+ && ep->to != HARD_FRAME_POINTER_REGNUM
+ && (! SUPPORTS_STACK_ALIGNMENT
+ || ! crtl->stack_realign_needed))
frame_pointer_needed = 0;
if (! ep->can_eliminate && ep->can_eliminate_previous)
return false;
}
-/* Initialize the table of registers to eliminate. */
+/* Initialize the table of registers to eliminate.
+ Pre-condition: global flag frame_pointer_needed has been set before
+ calling this function. */
static void
init_elim_table (void)
#endif
if (!reg_eliminate)
- reg_eliminate = xcalloc (sizeof (struct elim_table), NUM_ELIMINABLE_REGS);
-
- /* Does this function require a frame pointer? */
-
- frame_pointer_needed = (! flag_omit_frame_pointer
- /* ?? If EXIT_IGNORE_STACK is set, we will not save
- and restore sp for alloca. So we can't eliminate
- the frame pointer in that case. At some point,
- we should improve this by emitting the
- sp-adjusting insns for this case. */
- || (cfun->calls_alloca
- && EXIT_IGNORE_STACK)
- || crtl->accesses_prior_frames
- || FRAME_POINTER_REQUIRED);
+ reg_eliminate = XCNEWVEC (struct elim_table, NUM_ELIMINABLE_REGS);
num_eliminable = 0;
ep->from = ep1->from;
ep->to = ep1->to;
ep->can_eliminate = ep->can_eliminate_previous
- = (CAN_ELIMINATE (ep->from, ep->to)
- && ! (ep->to == STACK_POINTER_REGNUM && frame_pointer_needed));
+ = (targetm.can_eliminate (ep->from, ep->to)
+ && ! (ep->to == STACK_POINTER_REGNUM
+ && frame_pointer_needed
+ && (! SUPPORTS_STACK_ALIGNMENT
+ || ! stack_realign_fp)));
}
#else
reg_eliminate[0].from = reg_eliminate_1[0].from;
spill_reg_order[i] = -1;
EXECUTE_IF_SET_IN_REG_SET (&spilled_pseudos, FIRST_PSEUDO_REGISTER, i, rsi)
- {
- /* Record the current hard register the pseudo is allocated to in
- pseudo_previous_regs so we avoid reallocating it to the same
- hard reg in a later pass. */
- gcc_assert (reg_renumber[i] >= 0);
-
- SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
- /* Mark it as no longer having a hard register home. */
- reg_renumber[i] = -1;
- /* We will need to scan everything again. */
- something_changed = 1;
- }
+ if (! ira_conflicts_p || reg_renumber[i] >= 0)
+ {
+ /* Record the current hard register the pseudo is allocated to
+ in pseudo_previous_regs so we avoid reallocating it to the
+ same hard reg in a later pass. */
+ gcc_assert (reg_renumber[i] >= 0);
+
+ SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
+ /* Mark it as no longer having a hard register home. */
+ reg_renumber[i] = -1;
+ if (ira_conflicts_p)
+ /* Inform IRA about the change. */
+ ira_mark_allocation_change (i);
+ /* We will need to scan everything again. */
+ something_changed = 1;
+ }
/* Retry global register allocation if possible. */
- if (global)
+ if (global && ira_conflicts_p)
{
+ unsigned int n;
+
memset (pseudo_forbidden_regs, 0, max_regno * sizeof (HARD_REG_SET));
/* For every insn that needs reloads, set the registers used as spill
regs in pseudo_forbidden_regs for every pseudo live across the
}
}
- /* Retry allocating the spilled pseudos. For each reg, merge the
- various reg sets that indicate which hard regs can't be used,
- and call retry_global_alloc.
- We change spill_pseudos here to only contain pseudos that did not
- get a new hard register. */
- for (i = FIRST_PSEUDO_REGISTER; i < (unsigned)max_regno; i++)
+ /* Retry allocating the pseudos spilled in IRA and the
+ reload. For each reg, merge the various reg sets that
+ indicate which hard regs can't be used, and call
+ ira_reassign_pseudos. */
+ for (n = 0, i = FIRST_PSEUDO_REGISTER; i < (unsigned) max_regno; i++)
if (reg_old_renumber[i] != reg_renumber[i])
{
- HARD_REG_SET forbidden;
- COPY_HARD_REG_SET (forbidden, bad_spill_regs_global);
- IOR_HARD_REG_SET (forbidden, pseudo_forbidden_regs[i]);
- IOR_HARD_REG_SET (forbidden, pseudo_previous_regs[i]);
- retry_global_alloc (i, forbidden);
- if (reg_renumber[i] >= 0)
+ if (reg_renumber[i] < 0)
+ temp_pseudo_reg_arr[n++] = i;
+ else
CLEAR_REGNO_REG_SET (&spilled_pseudos, i);
}
+ if (ira_reassign_pseudos (temp_pseudo_reg_arr, n,
+ bad_spill_regs_global,
+ pseudo_forbidden_regs, pseudo_previous_regs,
+ &spilled_pseudos))
+ something_changed = 1;
}
-
/* Fix up the register information in the insn chain.
This involves deleting those of the spilled pseudos which did not get
a new hard register home from the live_{before,after} sets. */
HARD_REG_SET used_by_pseudos;
HARD_REG_SET used_by_pseudos2;
- AND_COMPL_REG_SET (&chain->live_throughout, &spilled_pseudos);
- AND_COMPL_REG_SET (&chain->dead_or_set, &spilled_pseudos);
-
+ if (! ira_conflicts_p)
+ {
+ /* Don't do it for IRA because IRA and the reload still can
+ assign hard registers to the spilled pseudos on next
+ reload iterations. */
+ AND_COMPL_REG_SET (&chain->live_throughout, &spilled_pseudos);
+ AND_COMPL_REG_SET (&chain->dead_or_set, &spilled_pseudos);
+ }
/* Mark any unallocated hard regs as available for spills. That
makes inheritance work somewhat better. */
if (chain->need_reload)
REG_SET_TO_HARD_REG_SET (used_by_pseudos2, &chain->dead_or_set);
IOR_HARD_REG_SET (used_by_pseudos, used_by_pseudos2);
- /* Save the old value for the sanity test below. */
- COPY_HARD_REG_SET (used_by_pseudos2, chain->used_spill_regs);
-
compute_use_by_pseudos (&used_by_pseudos, &chain->live_throughout);
compute_use_by_pseudos (&used_by_pseudos, &chain->dead_or_set);
+ /* Value of chain->used_spill_regs from previous iteration
+ may be not included in the value calculated here because
+ of possible removing caller-saves insns (see function
+ delete_caller_save_insns. */
COMPL_HARD_REG_SET (chain->used_spill_regs, used_by_pseudos);
AND_HARD_REG_SET (chain->used_spill_regs, used_spill_regs);
-
- /* Make sure we only enlarge the set. */
- gcc_assert (hard_reg_set_subset_p (used_by_pseudos2,
- chain->used_spill_regs));
}
}
+ CLEAR_REG_SET (&changed_allocation_pseudos);
/* Let alter_reg modify the reg rtx's for the modified pseudos. */
for (i = FIRST_PSEUDO_REGISTER; i < (unsigned)max_regno; i++)
{
if (reg_old_renumber[i] == regno)
continue;
- alter_reg (i, reg_old_renumber[i]);
+ SET_REGNO_REG_SET (&changed_allocation_pseudos, i);
+
+ alter_reg (i, reg_old_renumber[i], false);
reg_old_renumber[i] = regno;
if (dump_file)
{
fixup_eh_region_note (rtx insn, rtx prev, rtx next)
{
rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
- unsigned int trap_count;
- rtx i;
-
if (note == NULL)
return;
-
- if (may_trap_p (PATTERN (insn)))
- trap_count = 1;
- else
- {
- remove_note (insn, note);
- trap_count = 0;
- }
-
- for (i = NEXT_INSN (prev); i != next; i = NEXT_INSN (i))
- if (INSN_P (i) && i != insn && may_trap_p (PATTERN (i)))
- {
- trap_count++;
- REG_NOTES (i)
- = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0), REG_NOTES (i));
- }
+ if (!insn_could_throw_p (insn))
+ remove_note (insn, note);
+ copy_reg_eh_region_note_forward (note, NEXT_INSN (prev), next);
}
/* Reload pseudo-registers into hard regs around each insn as needed.
rtx prev = 0;
rtx insn = chain->insn;
rtx old_next = NEXT_INSN (insn);
+#ifdef AUTO_INC_DEC
+ rtx old_prev = PREV_INSN (insn);
+#endif
/* If we pass a label, copy the offsets from the label information
into the current offsets of each elimination. */
/* Merge any reloads that we didn't combine for fear of
increasing the number of spill registers needed but now
discover can be safely merged. */
- if (SMALL_REGISTER_CLASSES)
+ if (targetm.small_register_classes_for_mode_p (VOIDmode))
merge_assigned_reloads (insn);
/* Generate the insns to reload operands into or out of
continue;
if (n == 1)
{
- n = validate_replace_rtx (reload_reg,
- gen_rtx_fmt_e (code,
- mode,
- reload_reg),
- p);
+ rtx replace_reg
+ = gen_rtx_fmt_e (code, mode, reload_reg);
+
+ validate_replace_rtx_group (reload_reg,
+ replace_reg, p);
+ n = verify_changes (0);
/* We must also verify that the constraints
- are met after the replacement. */
- extract_insn (p);
+ are met after the replacement. Make sure
+ extract_insn is only called for an insn
+ where the replacements were found to be
+ valid so far. */
if (n)
- n = constrain_operands (1);
- else
- break;
-
- /* If the constraints were not met, then
- undo the replacement. */
- if (!n)
{
- validate_replace_rtx (gen_rtx_fmt_e (code,
- mode,
- reload_reg),
- reload_reg, p);
- break;
+ extract_insn (p);
+ n = constrain_operands (1);
}
+ /* If the constraints were not met, then
+ undo the replacement, else confirm it. */
+ if (!n)
+ cancel_changes (0);
+ else
+ confirm_change_group ();
}
break;
}
if (n == 1)
{
- REG_NOTES (p)
- = gen_rtx_EXPR_LIST (REG_INC, reload_reg,
- REG_NOTES (p));
+ add_reg_note (p, REG_INC, reload_reg);
/* Mark this as having an output reload so that the
REG_INC processing code below won't invalidate
the reload for inheritance. */
SET_REGNO_REG_SET (®_has_output_reload,
REGNO (XEXP (in_reg, 0)));
}
+ else if (code == PRE_INC || code == PRE_DEC
+ || code == POST_INC || code == POST_DEC)
+ {
+ int in_regno = REGNO (XEXP (in_reg, 0));
+
+ if (reg_last_reload_reg[in_regno] != NULL_RTX)
+ {
+ int in_hard_regno;
+ bool forget_p = true;
+
+ in_hard_regno = REGNO (reg_last_reload_reg[in_regno]);
+ if (TEST_HARD_REG_BIT (reg_reloaded_valid,
+ in_hard_regno))
+ {
+ for (x = old_prev ? NEXT_INSN (old_prev) : insn;
+ x != old_next;
+ x = NEXT_INSN (x))
+ if (x == reg_reloaded_insn[in_hard_regno])
+ {
+ forget_p = false;
+ break;
+ }
+ }
+ /* If for some reasons, we didn't set up
+ reg_last_reload_reg in this insn,
+ invalidate inheritance from previous
+ insns for the incremented/decremented
+ register. Such registers will be not in
+ reg_has_output_reload. Invalidate it
+ also if the corresponding element in
+ reg_reloaded_insn is also
+ invalidated. */
+ if (forget_p)
+ forget_old_reloads_1 (XEXP (in_reg, 0),
+ NULL_RTX, NULL);
+ }
+ }
}
}
/* If a pseudo that got a hard register is auto-incremented,
be partially clobbered by the call. */
else if (CALL_P (insn))
{
- AND_COMPL_HARD_REG_SET (reg_reloaded_valid, call_used_reg_set);
- AND_COMPL_HARD_REG_SET (reg_reloaded_valid, reg_reloaded_call_part_clobbered);
+ AND_COMPL_HARD_REG_SET (reg_reloaded_valid, call_used_reg_set);
+ AND_COMPL_HARD_REG_SET (reg_reloaded_valid, reg_reloaded_call_part_clobbered);
}
}
unless X is an output reload reg of the current insn.
X may be a hard reg (the reload reg)
- or it may be a pseudo reg that was reloaded from.
+ or it may be a pseudo reg that was reloaded from.
When DATA is non-NULL just mark the registers in regset
to be forgotten later. */
return true;
}
+/* The recursive function change all occurrences of WHAT in *WHERE
+ to REPL. */
+static void
+substitute (rtx *where, const_rtx what, rtx repl)
+{
+ const char *fmt;
+ int i;
+ enum rtx_code code;
+
+ if (*where == 0)
+ return;
+
+ if (*where == what || rtx_equal_p (*where, what))
+ {
+ /* Record the location of the changed rtx. */
+ VEC_safe_push (rtx_p, heap, substitute_stack, where);
+ *where = repl;
+ return;
+ }
+
+ code = GET_CODE (*where);
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
+ substitute (&XVECEXP (*where, i, j), what, repl);
+ }
+ else if (fmt[i] == 'e')
+ substitute (&XEXP (*where, i), what, repl);
+ }
+}
+
+/* The function returns TRUE if chain of reload R1 and R2 (in any
+ order) can be evaluated without usage of intermediate register for
+ the reload containing another reload. It is important to see
+ gen_reload to understand what the function is trying to do. As an
+ example, let us have reload chain
+
+ r2: const
+ r1: <something> + const
+
+ and reload R2 got reload reg HR. The function returns true if
+ there is a correct insn HR = HR + <something>. Otherwise,
+ gen_reload will use intermediate register (and this is the reload
+ reg for R1) to reload <something>.
+
+ We need this function to find a conflict for chain reloads. In our
+ example, if HR = HR + <something> is incorrect insn, then we cannot
+ use HR as a reload register for R2. If we do use it then we get a
+ wrong code:
+
+ HR = const
+ HR = <something>
+ HR = HR + HR
+
+*/
+static bool
+gen_reload_chain_without_interm_reg_p (int r1, int r2)
+{
+ /* Assume other cases in gen_reload are not possible for
+ chain reloads or do need an intermediate hard registers. */
+ bool result = true;
+ int regno, n, code;
+ rtx out, in, tem, insn;
+ rtx last = get_last_insn ();
+
+ /* Make r2 a component of r1. */
+ if (reg_mentioned_p (rld[r1].in, rld[r2].in))
+ {
+ n = r1;
+ r1 = r2;
+ r2 = n;
+ }
+ gcc_assert (reg_mentioned_p (rld[r2].in, rld[r1].in));
+ regno = rld[r1].regno >= 0 ? rld[r1].regno : rld[r2].regno;
+ gcc_assert (regno >= 0);
+ out = gen_rtx_REG (rld[r1].mode, regno);
+ in = rld[r1].in;
+ substitute (&in, rld[r2].in, gen_rtx_REG (rld[r2].mode, regno));
+
+ /* If IN is a paradoxical SUBREG, remove it and try to put the
+ opposite SUBREG on OUT. Likewise for a paradoxical SUBREG on OUT. */
+ if (GET_CODE (in) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (in))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+ && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (in)), out)) != 0)
+ in = SUBREG_REG (in), out = tem;
+
+ if (GET_CODE (in) == PLUS
+ && (REG_P (XEXP (in, 0))
+ || GET_CODE (XEXP (in, 0)) == SUBREG
+ || MEM_P (XEXP (in, 0)))
+ && (REG_P (XEXP (in, 1))
+ || GET_CODE (XEXP (in, 1)) == SUBREG
+ || CONSTANT_P (XEXP (in, 1))
+ || MEM_P (XEXP (in, 1))))
+ {
+ insn = emit_insn (gen_rtx_SET (VOIDmode, out, in));
+ code = recog_memoized (insn);
+ result = false;
+
+ if (code >= 0)
+ {
+ extract_insn (insn);
+ /* We want constrain operands to treat this insn strictly in
+ its validity determination, i.e., the way it would after
+ reload has completed. */
+ result = constrain_operands (1);
+ }
+
+ delete_insns_since (last);
+ }
+
+ /* Restore the original value at each changed address within R1. */
+ while (!VEC_empty (rtx_p, substitute_stack))
+ {
+ rtx *where = VEC_pop (rtx_p, substitute_stack);
+ *where = rld[r2].in;
+ }
+
+ return result;
+}
+
/* Return 1 if the reloads denoted by R1 and R2 cannot share a register.
Return 0 otherwise.
case RELOAD_FOR_OPERAND_ADDRESS:
return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_INSN
|| (r2_type == RELOAD_FOR_OPERAND_ADDRESS
- && !reloads_unique_chain_p (r1, r2)));
+ && (!reloads_unique_chain_p (r1, r2)
+ || !gen_reload_chain_without_interm_reg_p (r1, r2))));
case RELOAD_FOR_OPADDR_ADDR:
return (r2_type == RELOAD_FOR_INPUT
for (count = 0; count < n_spills; count++)
{
- int class = (int) rld[r].class;
+ int rclass = (int) rld[r].rclass;
int regnum;
i++;
&& free_for_value_p (regnum, rld[r].mode, rld[r].opnum,
rld[r].when_needed, rld[r].in,
rld[r].out, r, 1)))
- && TEST_HARD_REG_BIT (reg_class_contents[class], regnum)
+ && TEST_HARD_REG_BIT (reg_class_contents[rclass], regnum)
&& HARD_REGNO_MODE_OK (regnum, rld[r].mode)
/* Look first for regs to share, then for unshared. But
don't share regs used for inherited reloads; they are
while (nr > 1)
{
int regno = regnum + nr - 1;
- if (!(TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ if (!(TEST_HARD_REG_BIT (reg_class_contents[rclass], regno)
&& spill_reg_order[regno] >= 0
&& reload_reg_free_p (regno, rld[r].opnum,
rld[r].when_needed)))
{
max_group_size = MAX (rld[j].nregs, max_group_size);
group_class
- = reg_class_superunion[(int) rld[j].class][(int) group_class];
+ = reg_class_superunion[(int) rld[j].rclass][(int) group_class];
}
save_reload_reg_rtx[j] = rld[j].reg_rtx;
#endif
)
{
- enum reg_class class = rld[r].class, last_class;
+ enum reg_class rclass = rld[r].rclass, last_class;
rtx last_reg = reg_last_reload_reg[regno];
enum machine_mode need_mode;
need_mode = mode;
else
need_mode
- = smallest_mode_for_size (GET_MODE_BITSIZE (mode)
- + byte * BITS_PER_UNIT,
- GET_MODE_CLASS (mode));
+ = smallest_mode_for_size
+ (GET_MODE_BITSIZE (mode) + byte * BITS_PER_UNIT,
+ GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
+ ? MODE_INT : GET_MODE_CLASS (mode));
if ((GET_MODE_SIZE (GET_MODE (last_reg))
>= GET_MODE_SIZE (need_mode))
&& reg_reloaded_contents[i] == regno
&& TEST_HARD_REG_BIT (reg_reloaded_valid, i)
&& HARD_REGNO_MODE_OK (i, rld[r].mode)
- && (TEST_HARD_REG_BIT (reg_class_contents[(int) class], i)
+ && (TEST_HARD_REG_BIT (reg_class_contents[(int) rclass], i)
/* Even if we can't use this register as a reload
register, we might use it for reload_override_in,
if copying it to the desired class is cheap
enough. */
- || ((REGISTER_MOVE_COST (mode, last_class, class)
- < MEMORY_MOVE_COST (mode, class, 1))
- && (secondary_reload_class (1, class, mode,
+ || ((REGISTER_MOVE_COST (mode, last_class, rclass)
+ < MEMORY_MOVE_COST (mode, rclass, 1))
+ && (secondary_reload_class (1, rclass, mode,
last_reg)
== NO_REGS)
#ifdef SECONDARY_MEMORY_NEEDED
- && ! SECONDARY_MEMORY_NEEDED (last_class, class,
+ && ! SECONDARY_MEMORY_NEEDED (last_class, rclass,
mode)
#endif
))
bad_for_class = 0;
for (k = 0; k < nr; k++)
- bad_for_class |= ! TEST_HARD_REG_BIT (reg_class_contents[(int) rld[r].class],
+ bad_for_class |= ! TEST_HARD_REG_BIT (reg_class_contents[(int) rld[r].rclass],
i+k);
/* We found a register that contains the
|| REG_P (rld[r].in)
|| MEM_P (rld[r].in))
&& (rld[r].nregs == max_group_size
- || ! reg_classes_intersect_p (rld[r].class, group_class)))
+ || ! reg_classes_intersect_p (rld[r].rclass, group_class)))
search_equiv = rld[r].in;
/* If this is an output reload from a simple move insn, look
if an equivalence for the input is available. */
if (search_equiv)
{
rtx equiv
- = find_equiv_reg (search_equiv, insn, rld[r].class,
+ = find_equiv_reg (search_equiv, insn, rld[r].rclass,
-1, NULL, 0, rld[r].mode);
int regno = 0;
{
regs_used |= TEST_HARD_REG_BIT (reload_reg_used_at_all,
i);
- bad_for_class |= ! TEST_HARD_REG_BIT (reg_class_contents[(int) rld[r].class],
+ bad_for_class |= ! TEST_HARD_REG_BIT (reg_class_contents[(int) rld[r].rclass],
i);
}
int nr = hard_regno_nregs[regno][rld[r].mode];
int k;
rld[r].reg_rtx = equiv;
+ reload_spill_index[r] = regno;
reload_inherited[r] = 1;
/* If reg_reloaded_valid is not set for this register,
|| rld[s].optional)
continue;
- if ((rld[s].class != rld[r].class
- && reg_classes_intersect_p (rld[r].class,
- rld[s].class))
+ if ((rld[s].rclass != rld[r].rclass
+ && reg_classes_intersect_p (rld[r].rclass,
+ rld[s].rclass))
|| rld[s].nregs < rld[r].nregs)
break;
}
reload_spill_index[r] = -1;
}
\f
-/* If SMALL_REGISTER_CLASSES is nonzero, we may not have merged two
- reloads of the same item for fear that we might not have enough reload
- registers. However, normally they will get the same reload register
- and hence actually need not be loaded twice.
+/* If the small_register_classes_for_mode_p target hook returns true for
+ some machine modes, we may not have merged two reloads of the same item
+ for fear that we might not have enough reload registers. However,
+ normally they will get the same reload register and hence actually need
+ not be loaded twice.
Here we check for the most common case of this phenomenon: when we have
a number of reloads for the same object, each of which were allocated
rl->when_needed, old, rl->out, j, 0))
{
rtx temp = PREV_INSN (insn);
- while (temp && NOTE_P (temp))
+ while (temp && (NOTE_P (temp) || DEBUG_INSN_P (temp)))
temp = PREV_INSN (temp);
if (temp
&& NONJUMP_INSN_P (temp)
&& REG_N_SETS (REGNO (old)) == 1)
{
reg_renumber[REGNO (old)] = REGNO (reloadreg);
- alter_reg (REGNO (old), -1);
+ if (ira_conflicts_p)
+ /* Inform IRA about the change. */
+ ira_mark_allocation_change (REGNO (old));
+ alter_reg (REGNO (old), -1, false);
}
special = 1;
+
+ /* Adjust any debug insns between temp and insn. */
+ while ((temp = NEXT_INSN (temp)) != insn)
+ if (DEBUG_INSN_P (temp))
+ replace_rtx (PATTERN (temp), old, reloadreg);
+ else
+ gcc_assert (NOTE_P (temp));
}
else
{
sri.icode = CODE_FOR_nothing;
sri.prev_sri = NULL;
- new_class = targetm.secondary_reload (1, real_oldequiv, rl->class,
+ new_class = targetm.secondary_reload (1, real_oldequiv, rl->rclass,
mode, &sri);
if (new_class == NO_REGS && sri.icode == CODE_FOR_nothing)
else if (new_class == NO_REGS)
{
if (reload_adjust_reg_for_icode (&second_reload_reg,
- third_reload_reg, sri.icode))
- icode = sri.icode, third_reload_reg = 0;
+ third_reload_reg,
+ (enum insn_code) sri.icode))
+ {
+ icode = (enum insn_code) sri.icode;
+ third_reload_reg = 0;
+ }
else
- oldequiv = old, real_oldequiv = real_old;
+ {
+ oldequiv = old;
+ real_oldequiv = real_old;
+ }
}
else if (sri.icode != CODE_FOR_nothing)
/* We currently lack a way to express this in reloads. */
if (reload_adjust_reg_for_temp (&second_reload_reg,
third_reload_reg,
new_class, mode))
- third_reload_reg = 0, tertiary_icode = sri2.icode;
+ {
+ third_reload_reg = 0;
+ tertiary_icode = (enum insn_code) sri2.icode;
+ }
else
- oldequiv = old, real_oldequiv = real_old;
+ {
+ oldequiv = old;
+ real_oldequiv = real_old;
+ }
}
else if (new_t_class == NO_REGS && sri2.icode != CODE_FOR_nothing)
{
if (reload_adjust_reg_for_temp (&intermediate, NULL,
new_class, mode)
&& reload_adjust_reg_for_icode (&third_reload_reg, NULL,
- sri2.icode))
+ ((enum insn_code)
+ sri2.icode)))
{
second_reload_reg = intermediate;
- tertiary_icode = sri2.icode;
+ tertiary_icode = (enum insn_code) sri2.icode;
}
else
- oldequiv = old, real_oldequiv = real_old;
+ {
+ oldequiv = old;
+ real_oldequiv = real_old;
+ }
}
else if (new_t_class != NO_REGS && sri2.icode == CODE_FOR_nothing)
{
new_t_class, mode))
{
second_reload_reg = intermediate;
- tertiary_icode = sri2.icode;
+ tertiary_icode = (enum insn_code) sri2.icode;
}
else
- oldequiv = old, real_oldequiv = real_old;
+ {
+ oldequiv = old;
+ real_oldequiv = real_old;
+ }
}
else
- /* This could be handled more intelligently too. */
- oldequiv = old, real_oldequiv = real_old;
+ {
+ /* This could be handled more intelligently too. */
+ oldequiv = old;
+ real_oldequiv = real_old;
+ }
}
}
}
if (flag_non_call_exceptions)
- copy_eh_notes (insn, get_insns ());
+ copy_reg_eh_region_note_forward (insn, get_insns (), NULL);
/* End this sequence. */
*where = get_insns ();
&& reg_equiv_mem[REGNO (old)] != 0)
real_old = reg_equiv_mem[REGNO (old)];
- if (secondary_reload_class (0, rl->class, mode, real_old) != NO_REGS)
+ if (secondary_reload_class (0, rl->rclass, mode, real_old) != NO_REGS)
{
rtx second_reloadreg = reloadreg;
reloadreg = rld[secondary_reload].reg_rtx;
output_reload_insns[rl->opnum] = get_insns ();
if (flag_non_call_exceptions)
- copy_eh_notes (insn, get_insns ());
+ copy_reg_eh_region_note_forward (insn, get_insns (), NULL);
end_sequence ();
}
SET_HARD_REG_BIT (reg_reloaded_valid, src_regno + k);
if (HARD_REGNO_CALL_PART_CLOBBERED (src_regno + k,
mode))
- SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
+ SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
src_regno + k);
else
CLEAR_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
CLEAR_HARD_REG_BIT (reg_reloaded_died, src_regno);
}
reg_last_reload_reg[out_regno] = src_reg;
- /* We have to set reg_has_output_reload here, or else
+ /* We have to set reg_has_output_reload here, or else
forget_old_reloads_1 will clear reg_last_reload_reg
right away. */
SET_REGNO_REG_SET (®_has_output_reload,
#ifdef SECONDARY_MEMORY_NEEDED
/* If we need a memory location to do the move, do it that way. */
- else if ((REG_P (in) || GET_CODE (in) == SUBREG)
+ else if ((REG_P (in)
+ || (GET_CODE (in) == SUBREG && REG_P (SUBREG_REG (in))))
&& reg_or_subregno (in) < FIRST_PSEUDO_REGISTER
- && (REG_P (out) || GET_CODE (out) == SUBREG)
+ && (REG_P (out)
+ || (GET_CODE (out) == SUBREG && REG_P (SUBREG_REG (out))))
&& reg_or_subregno (out) < FIRST_PSEUDO_REGISTER
&& SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (reg_or_subregno (in)),
REGNO_REG_CLASS (reg_or_subregno (out)),
reg, 0);
if (substed)
n_occurrences += count_occurrences (PATTERN (insn),
- eliminate_regs (substed, 0,
+ eliminate_regs (substed, VOIDmode,
NULL_RTX), 0);
- for (i1 = reg_equiv_alt_mem_list [REGNO (reg)]; i1; i1 = XEXP (i1, 1))
+ for (i1 = reg_equiv_alt_mem_list[REGNO (reg)]; i1; i1 = XEXP (i1, 1))
{
gcc_assert (!rtx_equal_p (XEXP (i1, 0), substed));
n_occurrences += count_occurrences (PATTERN (insn), XEXP (i1, 0), 0);
/* For the debugging info, say the pseudo lives in this reload reg. */
reg_renumber[REGNO (reg)] = REGNO (new_reload_reg);
- alter_reg (REGNO (reg), -1);
+ if (ira_conflicts_p)
+ /* Inform IRA about the change. */
+ ira_mark_allocation_change (REGNO (reg));
+ alter_reg (REGNO (reg), -1, false);
}
else
{
set2 = single_set (prev);
if (! set || ! set2
|| GET_CODE (SET_SRC (set)) != PLUS || GET_CODE (SET_SRC (set2)) != PLUS
- || GET_CODE (XEXP (SET_SRC (set), 1)) != CONST_INT
- || GET_CODE (XEXP (SET_SRC (set2), 1)) != CONST_INT)
+ || !CONST_INT_P (XEXP (SET_SRC (set), 1))
+ || !CONST_INT_P (XEXP (SET_SRC (set2), 1)))
return;
dst = SET_DEST (set);
if (! rtx_equal_p (dst, SET_DEST (set2))
emit_insn (gen_add2_insn (reloadreg, inc));
store = emit_insn (gen_move_insn (incloc, reloadreg));
- if (GET_CODE (inc) == CONST_INT)
+ if (CONST_INT_P (inc))
emit_insn (gen_add2_insn (reloadreg, GEN_INT (-INTVAL (inc))));
else
emit_insn (gen_sub2_insn (reloadreg, inc));
if (code == MEM && auto_inc_p (XEXP (x, 0)))
{
- REG_NOTES (insn)
- = gen_rtx_EXPR_LIST (REG_INC, XEXP (XEXP (x, 0), 0), REG_NOTES (insn));
+ add_reg_note (insn, REG_INC, XEXP (XEXP (x, 0), 0));
return;
}
}
#endif
-/* Copy EH notes from an insn to its reloads. */
-static void
-copy_eh_notes (rtx insn, rtx x)
-{
- rtx eh_note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
- if (eh_note)
- {
- for (; x != 0; x = NEXT_INSN (x))
- {
- if (may_trap_p (PATTERN (x)))
- REG_NOTES (x)
- = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (eh_note, 0),
- REG_NOTES (x));
- }
- }
-}
-
/* This is used by reload pass, that does emit some instructions after
abnormal calls moving basic block end, but in fact it wants to emit
them on the edge. Looks for abnormal call edges, find backward the
}
/* It may be that we don't find any such trapping insn. In this
- case we discovered quite late that the insn that had been
+ case we discovered quite late that the insn that had been
marked as can_throw_internal in fact couldn't trap at all.
So we should in fact delete the EH edges out of the block. */
else