/* Reload pseudo regs into hard regs for insns that require hard regs.
- Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 89, 92, 93, 94, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#include <stdio.h>
#include "recog.h"
#include "basic-block.h"
#include "output.h"
+#include "real.h"
/* This file contains the reload pass of the compiler, which is
run after register allocation has been done. It checks that
elements that are actually valid; new ones are added at the end. */
static short spill_regs[FIRST_PSEUDO_REGISTER];
+/* Index of last register assigned as a spill register. We allocate in
+ a round-robin fashion. */
+
+static int last_spill_reg;
+
/* Describes order of preference for putting regs into spill_regs.
Contains the numbers of all the hard regs, in order most preferred first.
This order is different for each function.
\f
static int possible_group_p PROTO((int, int *));
static void count_possible_groups PROTO((int *, enum machine_mode *,
- int *));
+ int *, int));
static int modes_equiv_for_class_p PROTO((enum machine_mode,
enum machine_mode,
enum reg_class));
static int hard_reg_use_compare PROTO((struct hard_reg_n_uses *,
struct hard_reg_n_uses *));
static void order_regs_for_reload PROTO((void));
+static int compare_spill_regs PROTO((short *, short *));
static void reload_as_needed PROTO((rtx, int));
static void forget_old_reloads_1 PROTO((rtx, rtx));
static int reload_reg_class_lower PROTO((short *, short *));
static int reload_reg_free_p PROTO((int, int, enum reload_type));
static int reload_reg_free_before_p PROTO((int, int, enum reload_type));
static int reload_reg_reaches_end_p PROTO((int, int, enum reload_type));
+static int reloads_conflict PROTO((int, int));
static int allocate_reload_reg PROTO((int, rtx, int, int));
static void choose_reload_regs PROTO((rtx, rtx));
static void merge_assigned_reloads PROTO((rtx));
FILE *dumpfile;
{
register int class;
- register int i, j;
+ register int i, j, k;
register rtx insn;
register struct elim_table *ep;
bcopy (regs_ever_live, regs_explicitly_used, sizeof regs_ever_live);
/* We don't have a stack slot for any spill reg yet. */
- bzero (spill_stack_slot, sizeof spill_stack_slot);
- bzero (spill_stack_slot_width, sizeof spill_stack_slot_width);
+ bzero ((char *) spill_stack_slot, sizeof spill_stack_slot);
+ bzero ((char *) spill_stack_slot_width, sizeof spill_stack_slot_width);
/* Initialize the save area information for caller-save, in case some
are needed. */
be substituted eventually by altering the REG-rtx's. */
reg_equiv_constant = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero (reg_equiv_constant, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_constant, max_regno * sizeof (rtx));
reg_equiv_memory_loc = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero (reg_equiv_memory_loc, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_memory_loc, max_regno * sizeof (rtx));
reg_equiv_mem = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero (reg_equiv_mem, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_mem, max_regno * sizeof (rtx));
reg_equiv_init = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero (reg_equiv_init, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_init, max_regno * sizeof (rtx));
reg_equiv_address = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero (reg_equiv_address, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_address, max_regno * sizeof (rtx));
reg_max_ref_width = (int *) alloca (max_regno * sizeof (int));
- bzero (reg_max_ref_width, max_regno * sizeof (int));
+ bzero ((char *) reg_max_ref_width, max_regno * sizeof (int));
cannot_omit_stores = (char *) alloca (max_regno);
bzero (cannot_omit_stores, max_regno);
+#ifdef SMALL_REGISTER_CLASSES
+ CLEAR_HARD_REG_SET (forbidden_regs);
+#endif
+
/* Look for REG_EQUIV notes; record what each pseudo is equivalent to.
- Also find all paradoxical subregs
- and find largest such for each pseudo. */
+ Also find all paradoxical subregs and find largest such for each pseudo.
+ On machines with small register classes, record hard registers that
+ are used for user variables. These can never be used for spills. */
for (insn = first; insn; insn = NEXT_INSN (insn))
{
{
ep->can_eliminate = ep->can_eliminate_previous
= (CAN_ELIMINATE (ep->from, ep->to)
- && (ep->from != HARD_FRAME_POINTER_REGNUM
- || ! frame_pointer_needed));
+ && ! (ep->to == STACK_POINTER_REGNUM && frame_pointer_needed));
}
#else
reg_eliminate[0].can_eliminate = reg_eliminate[0].can_eliminate_previous
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
spill_reg_order[i] = -1;
+ /* Initialize to -1, which means take the first spill register. */
+ last_spill_reg = -1;
+
/* On most machines, we can't use any register explicitly used in the
rtl as a spill register. But on some, we have to. Those will have
taken care to keep the life of hard regs as short as possible. */
-#ifdef SMALL_REGISTER_CLASSES
- CLEAR_HARD_REG_SET (forbidden_regs);
-#else
+#ifndef SMALL_REGISTER_CLASSES
COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
#endif
/* Spill any hard regs that we know we can't eliminate. */
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
if (! ep->can_eliminate)
- {
- spill_hard_reg (ep->from, global, dumpfile, 1);
- regs_ever_live[ep->from] = 1;
- }
+ spill_hard_reg (ep->from, global, dumpfile, 1);
+
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ if (frame_pointer_needed)
+ spill_hard_reg (HARD_FRAME_POINTER_REGNUM, global, dumpfile, 1);
+#endif
if (global)
for (i = 0; i < N_REG_CLASSES; i++)
{
- basic_block_needs[i] = (char *)alloca (n_basic_blocks);
+ basic_block_needs[i] = (char *) alloca (n_basic_blocks);
bzero (basic_block_needs[i], n_basic_blocks);
}
rtx max_nongroups_insn[N_REG_CLASSES];
rtx x;
int starting_frame_size = get_frame_size ();
+ int previous_frame_pointer_needed = frame_pointer_needed;
static char *reg_class_names[] = REG_CLASS_NAMES;
something_changed = 0;
- bzero (max_needs, sizeof max_needs);
- bzero (max_groups, sizeof max_groups);
- bzero (max_nongroups, sizeof max_nongroups);
- bzero (max_needs_insn, sizeof max_needs_insn);
- bzero (max_groups_insn, sizeof max_groups_insn);
- bzero (max_nongroups_insn, sizeof max_nongroups_insn);
- bzero (group_size, sizeof group_size);
+ bzero ((char *) max_needs, sizeof max_needs);
+ bzero ((char *) max_groups, sizeof max_groups);
+ bzero ((char *) max_nongroups, sizeof max_nongroups);
+ bzero ((char *) max_needs_insn, sizeof max_needs_insn);
+ bzero ((char *) max_groups_insn, sizeof max_groups_insn);
+ bzero ((char *) max_nongroups_insn, sizeof max_nongroups_insn);
+ bzero ((char *) group_size, sizeof group_size);
for (i = 0; i < N_REG_CLASSES; i++)
group_mode[i] = VOIDmode;
num_not_at_initial_offset = 0;
- bzero (&offsets_known_at[get_first_label_num ()], num_labels);
+ bzero ((char *) &offsets_known_at[get_first_label_num ()], num_labels);
/* Set a known offset for each forced label to be at the initial offset
of each elimination. We do this because we assume that all
int old_code = INSN_CODE (insn);
rtx old_notes = REG_NOTES (insn);
int did_elimination = 0;
- int max_total_input_groups = 0, max_total_output_groups = 0;
/* To compute the number of reload registers of each class
- needed for an insn, we must similate what choose_reload_regs
+ needed for an insn, we must simulate what choose_reload_regs
can do. We do this by splitting an insn into an "input" and
an "output" part. RELOAD_OTHER reloads are used in both.
The input part uses those reloads, RELOAD_FOR_INPUT reloads,
The total number of registers needed is the maximum of the
inputs and outputs. */
- /* These just count RELOAD_OTHER. */
- int insn_needs[N_REG_CLASSES];
- int insn_groups[N_REG_CLASSES];
- int insn_total_groups = 0;
-
- /* Count RELOAD_FOR_INPUT reloads. */
- int insn_needs_for_inputs[N_REG_CLASSES];
- int insn_groups_for_inputs[N_REG_CLASSES];
- int insn_total_groups_for_inputs = 0;
-
- /* Count RELOAD_FOR_OUTPUT reloads. */
- int insn_needs_for_outputs[N_REG_CLASSES];
- int insn_groups_for_outputs[N_REG_CLASSES];
- int insn_total_groups_for_outputs = 0;
-
- /* Count RELOAD_FOR_INSN reloads. */
- int insn_needs_for_insn[N_REG_CLASSES];
- int insn_groups_for_insn[N_REG_CLASSES];
- int insn_total_groups_for_insn = 0;
-
- /* Count RELOAD_FOR_OTHER_ADDRESS reloads. */
- int insn_needs_for_other_addr[N_REG_CLASSES];
- int insn_groups_for_other_addr[N_REG_CLASSES];
- int insn_total_groups_for_other_addr = 0;
-
- /* Count RELOAD_FOR_INPUT_ADDRESS reloads. */
- int insn_needs_for_in_addr[MAX_RECOG_OPERANDS][N_REG_CLASSES];
- int insn_groups_for_in_addr[MAX_RECOG_OPERANDS][N_REG_CLASSES];
- int insn_total_groups_for_in_addr[MAX_RECOG_OPERANDS];
-
- /* Count RELOAD_FOR_OUTPUT_ADDRESS reloads. */
- int insn_needs_for_out_addr[MAX_RECOG_OPERANDS][N_REG_CLASSES];
- int insn_groups_for_out_addr[MAX_RECOG_OPERANDS][N_REG_CLASSES];
- int insn_total_groups_for_out_addr[MAX_RECOG_OPERANDS];
-
- /* Count RELOAD_FOR_OPERAND_ADDRESS reloads. */
- int insn_needs_for_op_addr[N_REG_CLASSES];
- int insn_groups_for_op_addr[N_REG_CLASSES];
- int insn_total_groups_for_op_addr = 0;
-
-#if 0 /* This wouldn't work nowadays, since optimize_bit_field
- looks for non-strict memory addresses. */
- /* Optimization: a bit-field instruction whose field
- happens to be a byte or halfword in memory
- can be changed to a move instruction. */
-
- if (GET_CODE (PATTERN (insn)) == SET)
+ struct needs
{
- rtx dest = SET_DEST (PATTERN (insn));
- rtx src = SET_SRC (PATTERN (insn));
-
- if (GET_CODE (dest) == ZERO_EXTRACT
- || GET_CODE (dest) == SIGN_EXTRACT)
- optimize_bit_field (PATTERN (insn), insn, reg_equiv_mem);
- if (GET_CODE (src) == ZERO_EXTRACT
- || GET_CODE (src) == SIGN_EXTRACT)
- optimize_bit_field (PATTERN (insn), insn, reg_equiv_mem);
- }
-#endif
+ /* [0] is normal, [1] is nongroup. */
+ int regs[2][N_REG_CLASSES];
+ int groups[N_REG_CLASSES];
+ };
+
+ /* Each `struct needs' corresponds to one RELOAD_... type. */
+ struct {
+ struct needs other;
+ struct needs input;
+ struct needs output;
+ struct needs insn;
+ struct needs other_addr;
+ struct needs op_addr;
+ struct needs op_addr_reload;
+ struct needs in_addr[MAX_RECOG_OPERANDS];
+ struct needs out_addr[MAX_RECOG_OPERANDS];
+ } insn_needs;
/* If needed, eliminate any eliminable registers. */
if (num_eliminable)
&& !(GET_CODE (PATTERN (insn)) == SET
&& SET_DEST (PATTERN (insn)) == stack_pointer_rtx))
{
- if (reg_mentioned_p (after_call, PATTERN (insn)))
+ if (reg_referenced_p (after_call, PATTERN (insn)))
avoid_return_reg = after_call;
after_call = 0;
}
continue;
something_needs_reloads = 1;
+ bzero ((char *) &insn_needs, sizeof insn_needs);
- for (i = 0; i < N_REG_CLASSES; i++)
- {
- insn_needs[i] = 0, insn_groups[i] = 0;
- insn_needs_for_inputs[i] = 0, insn_groups_for_inputs[i] = 0;
- insn_needs_for_outputs[i] = 0, insn_groups_for_outputs[i] = 0;
- insn_needs_for_insn[i] = 0, insn_groups_for_insn[i] = 0;
- insn_needs_for_op_addr[i] = 0, insn_groups_for_op_addr[i] = 0;
- insn_needs_for_other_addr[i] = 0;
- insn_groups_for_other_addr[i] = 0;
- }
-
- for (i = 0; i < reload_n_operands; i++)
- {
- insn_total_groups_for_in_addr[i] = 0;
- insn_total_groups_for_out_addr[i] = 0;
-
- for (j = 0; j < N_REG_CLASSES; j++)
- {
- insn_needs_for_in_addr[i][j] = 0;
- insn_needs_for_out_addr[i][j] = 0;
- insn_groups_for_in_addr[i][j] = 0;
- insn_groups_for_out_addr[i][j] = 0;
- }
- }
-
/* Count each reload once in every class
containing the reload's own class. */
enum reg_class class = reload_reg_class[i];
int size;
enum machine_mode mode;
- int *this_groups;
- int *this_needs;
- int *this_total_groups;
+ int nongroup_need;
+ struct needs *this_needs;
/* Don't count the dummy reloads, for which one of the
regs mentioned in the insn can be used for reloading.
new_basic_block_needs = 1;
}
+
+ mode = reload_inmode[i];
+ if (GET_MODE_SIZE (reload_outmode[i]) > GET_MODE_SIZE (mode))
+ mode = reload_outmode[i];
+ size = CLASS_MAX_NREGS (class, mode);
+
+ /* If this class doesn't want a group, determine if we have
+ a nongroup need or a regular need. We have a nongroup
+ need if this reload conflicts with a group reload whose
+ class intersects with this reload's class. */
+
+ nongroup_need = 0;
+ if (size == 1)
+ for (j = 0; j < n_reloads; j++)
+ if ((CLASS_MAX_NREGS (reload_reg_class[j],
+ (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j]))
+ ? reload_outmode[j]
+ : reload_inmode[j])
+ > 1)
+ && (!reload_optional[j])
+ && (reload_in[j] != 0 || reload_out[j] != 0
+ || reload_secondary_p[j])
+ && reloads_conflict (i, j)
+ && reg_classes_intersect_p (class,
+ reload_reg_class[j]))
+ {
+ nongroup_need = 1;
+ break;
+ }
+
/* Decide which time-of-use to count this reload for. */
switch (reload_when_needed[i])
{
case RELOAD_OTHER:
- this_needs = insn_needs;
- this_groups = insn_groups;
- this_total_groups = &insn_total_groups;
+ this_needs = &insn_needs.other;
break;
-
case RELOAD_FOR_INPUT:
- this_needs = insn_needs_for_inputs;
- this_groups = insn_groups_for_inputs;
- this_total_groups = &insn_total_groups_for_inputs;
+ this_needs = &insn_needs.input;
break;
-
case RELOAD_FOR_OUTPUT:
- this_needs = insn_needs_for_outputs;
- this_groups = insn_groups_for_outputs;
- this_total_groups = &insn_total_groups_for_outputs;
+ this_needs = &insn_needs.output;
break;
-
case RELOAD_FOR_INSN:
- this_needs = insn_needs_for_insn;
- this_groups = insn_groups_for_insn;
- this_total_groups = &insn_total_groups_for_insn;
+ this_needs = &insn_needs.insn;
break;
-
case RELOAD_FOR_OTHER_ADDRESS:
- this_needs = insn_needs_for_other_addr;
- this_groups = insn_groups_for_other_addr;
- this_total_groups = &insn_total_groups_for_other_addr;
+ this_needs = &insn_needs.other_addr;
break;
-
case RELOAD_FOR_INPUT_ADDRESS:
- this_needs = insn_needs_for_in_addr[reload_opnum[i]];
- this_groups = insn_groups_for_in_addr[reload_opnum[i]];
- this_total_groups
- = &insn_total_groups_for_in_addr[reload_opnum[i]];
+ this_needs = &insn_needs.in_addr[reload_opnum[i]];
break;
-
case RELOAD_FOR_OUTPUT_ADDRESS:
- this_needs = insn_needs_for_out_addr[reload_opnum[i]];
- this_groups = insn_groups_for_out_addr[reload_opnum[i]];
- this_total_groups
- = &insn_total_groups_for_out_addr[reload_opnum[i]];
+ this_needs = &insn_needs.out_addr[reload_opnum[i]];
break;
-
case RELOAD_FOR_OPERAND_ADDRESS:
- this_needs = insn_needs_for_op_addr;
- this_groups = insn_groups_for_op_addr;
- this_total_groups = &insn_total_groups_for_op_addr;
+ this_needs = &insn_needs.op_addr;
+ break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ this_needs = &insn_needs.op_addr_reload;
break;
}
- mode = reload_inmode[i];
- if (GET_MODE_SIZE (reload_outmode[i]) > GET_MODE_SIZE (mode))
- mode = reload_outmode[i];
- size = CLASS_MAX_NREGS (class, mode);
if (size > 1)
{
enum machine_mode other_mode, allocate_mode;
/* Count number of groups needed separately from
number of individual regs needed. */
- this_groups[(int) class]++;
+ this_needs->groups[(int) class]++;
p = reg_class_superclasses[(int) class];
while (*p != LIM_REG_CLASSES)
- this_groups[(int) *p++]++;
- (*this_total_groups)++;
+ this_needs->groups[(int) *p++]++;
/* Record size and mode of a group of this class. */
/* If more than one size group is needed,
/* Crash if two dissimilar machine modes both need
groups of consecutive regs of the same class. */
- if (other_mode != VOIDmode
- && other_mode != allocate_mode
+ if (other_mode != VOIDmode && other_mode != allocate_mode
&& ! modes_equiv_for_class_p (allocate_mode,
- other_mode,
- class))
- abort ();
+ other_mode, class))
+ fatal_insn ("Two dissimilar machine modes both need groups of consecutive regs of the same class",
+ insn);
}
else if (size == 1)
{
- this_needs[(int) class] += 1;
+ this_needs->regs[nongroup_need][(int) class] += 1;
p = reg_class_superclasses[(int) class];
while (*p != LIM_REG_CLASSES)
- this_needs[(int) *p++] += 1;
+ this_needs->regs[nongroup_need][(int) *p++] += 1;
}
else
abort ();
{
int in_max, out_max;
- for (in_max = 0, out_max = 0, j = 0;
- j < reload_n_operands; j++)
+ /* Compute normal and nongroup needs. */
+ for (j = 0; j <= 1; j++)
{
- in_max = MAX (in_max, insn_needs_for_in_addr[j][i]);
- out_max = MAX (out_max, insn_needs_for_out_addr[j][i]);
- }
+ for (in_max = 0, out_max = 0, k = 0;
+ k < reload_n_operands; k++)
+ {
+ in_max
+ = MAX (in_max, insn_needs.in_addr[k].regs[j][i]);
+ out_max
+ = MAX (out_max, insn_needs.out_addr[k].regs[j][i]);
+ }
+
+ /* RELOAD_FOR_INSN reloads conflict with inputs, outputs,
+ and operand addresses but not things used to reload
+ them. Similarly, RELOAD_FOR_OPERAND_ADDRESS reloads
+ don't conflict with things needed to reload inputs or
+ outputs. */
- /* RELOAD_FOR_INSN reloads conflict with inputs, outputs,
- and operand addresses but not things used to reload them.
- Similarly, RELOAD_FOR_OPERAND_ADDRESS reloads don't
- conflict with things needed to reload inputs or
- outputs. */
+ in_max = MAX (MAX (insn_needs.op_addr.regs[j][i],
+ insn_needs.op_addr_reload.regs[j][i]),
+ in_max);
- in_max = MAX (in_max, insn_needs_for_op_addr[i]);
- out_max = MAX (out_max, insn_needs_for_insn[i]);
+ out_max = MAX (out_max, insn_needs.insn.regs[j][i]);
- insn_needs_for_inputs[i]
- = MAX (insn_needs_for_inputs[i]
- + insn_needs_for_op_addr[i]
- + insn_needs_for_insn[i],
- in_max + insn_needs_for_inputs[i]);
+ insn_needs.input.regs[j][i]
+ = MAX (insn_needs.input.regs[j][i]
+ + insn_needs.op_addr.regs[j][i]
+ + insn_needs.insn.regs[j][i],
+ in_max + insn_needs.input.regs[j][i]);
- insn_needs_for_outputs[i] += out_max;
- insn_needs[i] += MAX (MAX (insn_needs_for_inputs[i],
- insn_needs_for_outputs[i]),
- insn_needs_for_other_addr[i]);
+ insn_needs.output.regs[j][i] += out_max;
+ insn_needs.other.regs[j][i]
+ += MAX (MAX (insn_needs.input.regs[j][i],
+ insn_needs.output.regs[j][i]),
+ insn_needs.other_addr.regs[j][i]);
+
+ }
+ /* Now compute group needs. */
for (in_max = 0, out_max = 0, j = 0;
j < reload_n_operands; j++)
{
- in_max = MAX (in_max, insn_groups_for_in_addr[j][i]);
- out_max = MAX (out_max, insn_groups_for_out_addr[j][i]);
+ in_max = MAX (in_max, insn_needs.in_addr[j].groups[i]);
+ out_max
+ = MAX (out_max, insn_needs.out_addr[j].groups[i]);
}
- in_max = MAX (in_max, insn_groups_for_op_addr[i]);
- out_max = MAX (out_max, insn_groups_for_insn[i]);
-
- insn_groups_for_inputs[i]
- = MAX (insn_groups_for_inputs[i]
- + insn_groups_for_op_addr[i]
- + insn_groups_for_insn[i],
- in_max + insn_groups_for_inputs[i]);
-
- insn_groups_for_outputs[i] += out_max;
- insn_groups[i] += MAX (MAX (insn_groups_for_inputs[i],
- insn_groups_for_outputs[i]),
- insn_groups_for_other_addr[i]);
- }
-
- for (i = 0; i < reload_n_operands; i++)
- {
- max_total_input_groups
- = MAX (max_total_input_groups,
- insn_total_groups_for_in_addr[i]);
- max_total_output_groups
- = MAX (max_total_output_groups,
- insn_total_groups_for_out_addr[i]);
+ in_max = MAX (MAX (insn_needs.op_addr.groups[i],
+ insn_needs.op_addr_reload.groups[i]),
+ in_max);
+ out_max = MAX (out_max, insn_needs.insn.groups[i]);
+
+ insn_needs.input.groups[i]
+ = MAX (insn_needs.input.groups[i]
+ + insn_needs.op_addr.groups[i]
+ + insn_needs.insn.groups[i],
+ in_max + insn_needs.input.groups[i]);
+
+ insn_needs.output.groups[i] += out_max;
+ insn_needs.other.groups[i]
+ += MAX (MAX (insn_needs.input.groups[i],
+ insn_needs.output.groups[i]),
+ insn_needs.other_addr.groups[i]);
}
- max_total_input_groups = MAX (max_total_input_groups,
- insn_total_groups_for_op_addr);
- max_total_output_groups = MAX (max_total_output_groups,
- insn_total_groups_for_insn);
-
- insn_total_groups_for_inputs
- = MAX (max_total_input_groups + insn_total_groups_for_op_addr
- + insn_total_groups_for_insn,
- max_total_input_groups + insn_total_groups_for_inputs);
-
- insn_total_groups_for_outputs += max_total_output_groups;
-
- insn_total_groups += MAX (MAX (insn_total_groups_for_outputs,
- insn_total_groups_for_inputs),
- insn_total_groups_for_other_addr);
-
/* If this is a CALL_INSN and caller-saves will need
a spill register, act as if the spill register is
needed for this insn. However, the spill register
if (GET_CODE (insn) == CALL_INSN
&& caller_save_spill_class != NO_REGS)
{
- int *caller_save_needs
- = (caller_save_group_size > 1 ? insn_groups : insn_needs);
+ /* See if this register would conflict with any reload
+ that needs a group. */
+ int nongroup_need = 0;
+ int *caller_save_needs;
+
+ for (j = 0; j < n_reloads; j++)
+ if ((CLASS_MAX_NREGS (reload_reg_class[j],
+ (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j]))
+ ? reload_outmode[j]
+ : reload_inmode[j])
+ > 1)
+ && reg_classes_intersect_p (caller_save_spill_class,
+ reload_reg_class[j]))
+ {
+ nongroup_need = 1;
+ break;
+ }
+
+ caller_save_needs
+ = (caller_save_group_size > 1
+ ? insn_needs.other.groups
+ : insn_needs.other.regs[nongroup_need]);
if (caller_save_needs[(int) caller_save_spill_class] == 0)
{
caller_save_needs[(int) *p++] += 1;
}
- if (caller_save_group_size > 1)
- insn_total_groups = MAX (insn_total_groups, 1);
-
-
- /* Show that this basic block will need a register of
+ /* Show that this basic block will need a register of
this class. */
- if (global
- && ! (basic_block_needs[(int) caller_save_spill_class]
- [this_block]))
- {
- basic_block_needs[(int) caller_save_spill_class]
- [this_block] = 1;
- new_basic_block_needs = 1;
- }
+ if (global
+ && ! (basic_block_needs[(int) caller_save_spill_class]
+ [this_block]))
+ {
+ basic_block_needs[(int) caller_save_spill_class]
+ [this_block] = 1;
+ new_basic_block_needs = 1;
+ }
}
#ifdef SMALL_REGISTER_CLASSES
then add add an extra need in that class.
This makes sure we have a register available that does
not overlap the return value. */
+
if (avoid_return_reg)
{
int regno = REGNO (avoid_return_reg);
need only in the smallest class in which it
is required. */
- bcopy (insn_needs, basic_needs, sizeof basic_needs);
- bcopy (insn_groups, basic_groups, sizeof basic_groups);
+ bcopy ((char *) insn_needs.other.regs[0],
+ (char *) basic_needs, sizeof basic_needs);
+ bcopy ((char *) insn_needs.other.groups,
+ (char *) basic_groups, sizeof basic_groups);
for (i = 0; i < N_REG_CLASSES; i++)
{
}
/* Now count extra regs if there might be a conflict with
- the return value register.
+ the return value register. */
- ??? This is not quite correct because we don't properly
- handle the case of groups, but if we end up doing
- something wrong, it either will end up not mattering or
- we will abort elsewhere. */
-
for (r = regno; r < regno + nregs; r++)
if (spill_reg_order[r] >= 0)
for (i = 0; i < N_REG_CLASSES; i++)
if (TEST_HARD_REG_BIT (reg_class_contents[i], r))
{
- if (basic_needs[i] > 0 || basic_groups[i] > 0)
+ if (basic_needs[i] > 0)
+ {
+ enum reg_class *p;
+
+ insn_needs.other.regs[0][i]++;
+ p = reg_class_superclasses[i];
+ while (*p != LIM_REG_CLASSES)
+ insn_needs.other.regs[0][(int) *p++]++;
+ }
+ if (basic_groups[i] > 0)
{
enum reg_class *p;
- insn_needs[i]++;
+ insn_needs.other.groups[i]++;
p = reg_class_superclasses[i];
while (*p != LIM_REG_CLASSES)
- insn_needs[(int) *p++]++;
+ insn_needs.other.groups[(int) *p++]++;
}
}
}
for (i = 0; i < N_REG_CLASSES; i++)
{
- if (max_needs[i] < insn_needs[i])
+ if (max_needs[i] < insn_needs.other.regs[0][i])
{
- max_needs[i] = insn_needs[i];
+ max_needs[i] = insn_needs.other.regs[0][i];
max_needs_insn[i] = insn;
}
- if (max_groups[i] < insn_groups[i])
+ if (max_groups[i] < insn_needs.other.groups[i])
{
- max_groups[i] = insn_groups[i];
+ max_groups[i] = insn_needs.other.groups[i];
max_groups_insn[i] = insn;
}
- if (insn_total_groups > 0)
- if (max_nongroups[i] < insn_needs[i])
- {
- max_nongroups[i] = insn_needs[i];
- max_nongroups_insn[i] = insn;
- }
+ if (max_nongroups[i] < insn_needs.other.regs[1][i])
+ {
+ max_nongroups[i] = insn_needs.other.regs[1][i];
+ max_nongroups_insn[i] = insn;
+ }
}
}
/* Note that there is a continue statement above. */
{
ep->can_eliminate_previous = 0;
spill_hard_reg (ep->from, global, dumpfile, 1);
- regs_ever_live[ep->from] = 1;
something_changed = 1;
num_eliminable--;
}
}
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ /* If we didn't need a frame pointer last time, but we do now, spill
+ the hard frame pointer. */
+ if (frame_pointer_needed && ! previous_frame_pointer_needed)
+ {
+ spill_hard_reg (HARD_FRAME_POINTER_REGNUM, global, dumpfile, 1);
+ something_changed = 1;
+ }
+#endif
+
/* If all needs are met, we win. */
for (i = 0; i < N_REG_CLASSES; i++)
/* Put all registers spilled so far back in potential_reload_regs, but
put them at the front, since we've already spilled most of the
- psuedos in them (we might have left some pseudos unspilled if they
+ pseudos in them (we might have left some pseudos unspilled if they
were in a block that didn't need any spill registers of a conflicting
class. We used to try to mark off the need for those registers,
but doing so properly is very complex and reallocating them is the
/* If any single spilled regs happen to form groups,
count them now. Maybe we don't really need
to spill another group. */
- count_possible_groups (group_size, group_mode, max_groups);
+ count_possible_groups (group_size, group_mode, max_groups,
+ class);
if (max_groups[class] <= 0)
break;
max_groups[class]--;
p = reg_class_superclasses[class];
while (*p != LIM_REG_CLASSES)
- max_groups[(int) *p++]--;
+ {
+ if (group_size [(int) *p] <= group_size [class])
+ max_groups[(int) *p]--;
+ p++;
+ }
/* Indicate both these regs are part of a group. */
SET_HARD_REG_BIT (counted_for_groups, j);
max_groups[class]--;
p = reg_class_superclasses[class];
while (*p != LIM_REG_CLASSES)
- max_groups[(int) *p++]--;
-
+ {
+ if (group_size [(int) *p]
+ <= group_size [class])
+ max_groups[(int) *p]--;
+ p++;
+ }
break;
}
}
return 0;
}
\f
-/* Count any groups that can be formed from the registers recently spilled.
- This is done class by class, in order of ascending class number. */
+/* Count any groups of CLASS that can be formed from the registers recently
+ spilled. */
static void
-count_possible_groups (group_size, group_mode, max_groups)
+count_possible_groups (group_size, group_mode, max_groups, class)
int *group_size;
enum machine_mode *group_mode;
int *max_groups;
+ int class;
{
- int i;
+ HARD_REG_SET new;
+ int i, j;
+
/* Now find all consecutive groups of spilled registers
and mark each group off against the need for such groups.
But don't count them against ordinary need, yet. */
- for (i = 0; i < N_REG_CLASSES; i++)
- if (group_size[i] > 1)
+ if (group_size[class] == 0)
+ return;
+
+ CLEAR_HARD_REG_SET (new);
+
+ /* Make a mask of all the regs that are spill regs in class I. */
+ for (i = 0; i < n_spills; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[class], spill_regs[i])
+ && ! TEST_HARD_REG_BIT (counted_for_groups, spill_regs[i])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups, spill_regs[i]))
+ SET_HARD_REG_BIT (new, spill_regs[i]);
+
+ /* Find each consecutive group of them. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER && max_groups[class] > 0; i++)
+ if (TEST_HARD_REG_BIT (new, i)
+ && i + group_size[class] <= FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_MODE_OK (i, group_mode[class]))
{
- HARD_REG_SET new;
- int j;
-
- CLEAR_HARD_REG_SET (new);
-
- /* Make a mask of all the regs that are spill regs in class I. */
- for (j = 0; j < n_spills; j++)
- if (TEST_HARD_REG_BIT (reg_class_contents[i], spill_regs[j])
- && ! TEST_HARD_REG_BIT (counted_for_groups, spill_regs[j])
- && ! TEST_HARD_REG_BIT (counted_for_nongroups,
- spill_regs[j]))
- SET_HARD_REG_BIT (new, spill_regs[j]);
-
- /* Find each consecutive group of them. */
- for (j = 0; j < FIRST_PSEUDO_REGISTER && max_groups[i] > 0; j++)
- if (TEST_HARD_REG_BIT (new, j)
- && j + group_size[i] <= FIRST_PSEUDO_REGISTER
- /* Next line in case group-mode for this class
- demands an even-odd pair. */
- && HARD_REGNO_MODE_OK (j, group_mode[i]))
- {
- int k;
- for (k = 1; k < group_size[i]; k++)
- if (! TEST_HARD_REG_BIT (new, j + k))
- break;
- if (k == group_size[i])
- {
- /* We found a group. Mark it off against this class's
- need for groups, and against each superclass too. */
- register enum reg_class *p;
- max_groups[i]--;
- p = reg_class_superclasses[i];
- while (*p != LIM_REG_CLASSES)
- max_groups[(int) *p++]--;
- /* Don't count these registers again. */
- for (k = 0; k < group_size[i]; k++)
- SET_HARD_REG_BIT (counted_for_groups, j + k);
- }
- /* Skip to the last reg in this group. When j is incremented
- above, it will then point to the first reg of the next
- possible group. */
- j += k - 1;
- }
- }
+ for (j = 1; j < group_size[class]; j++)
+ if (! TEST_HARD_REG_BIT (new, i + j))
+ break;
+
+ if (j == group_size[class])
+ {
+ /* We found a group. Mark it off against this class's need for
+ groups, and against each superclass too. */
+ register enum reg_class *p;
+ max_groups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ {
+ if (group_size [(int) *p] <= group_size [class])
+ max_groups[(int) *p]--;
+ p++;
+ }
+
+ /* Don't count these registers again. */
+ for (j = 0; j < group_size[class]; j++)
+ SET_HARD_REG_BIT (counted_for_groups, i + j);
+ }
+
+ /* Skip to the last reg in this group. When i is incremented above,
+ it will then point to the first reg of the next possible group. */
+ i += j - 1;
+ }
}
\f
/* ALLOCATE_MODE is a register mode that needs to be reloaded. OTHER_MODE is
if (asm_noperands (PATTERN (insn)) >= 0)
error_for_asm (insn, "`asm' needs too many reloads");
else
- abort ();
+ fatal_insn ("Unable to find a register to spill.", insn);
}
/* Add a new register to the tables of available spill-registers
if (fixed_regs[regno] || TEST_HARD_REG_BIT (forbidden_regs, regno))
fatal ("fixed or forbidden register was spilled.\n\
-This may be due to a compiler bug or to impossible asm statements.");
+This may be due to a compiler bug or to impossible asm\n\
+statements or clauses.");
/* Make reg REGNO an additional reload reg. */
{
/* No known place to spill from => no slot to reuse. */
x = assign_stack_local (GET_MODE (regno_reg_rtx[i]), total_size, -1);
-#if BYTES_BIG_ENDIAN
- /* Cancel the big-endian correction done in assign_stack_local.
- Get the address of the beginning of the slot.
- This is so we can do a big-endian correction unconditionally
- below. */
- adjust = inherent_size - total_size;
-#endif
+ if (BYTES_BIG_ENDIAN)
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot.
+ This is so we can do a big-endian correction unconditionally
+ below. */
+ adjust = inherent_size - total_size;
+
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[i]);
}
/* Reuse a stack slot if possible. */
else if (spill_stack_slot[from_reg] != 0
/* Compute maximum size needed, both for inherent size
and for total size. */
enum machine_mode mode = GET_MODE (regno_reg_rtx[i]);
+ rtx stack_slot;
if (spill_stack_slot[from_reg])
{
if (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg]))
}
/* Make a slot with that size. */
x = assign_stack_local (mode, total_size, -1);
-#if BYTES_BIG_ENDIAN
- /* Cancel the big-endian correction done in assign_stack_local.
- Get the address of the beginning of the slot.
- This is so we can do a big-endian correction unconditionally
- below. */
- adjust = GET_MODE_SIZE (mode) - total_size;
-#endif
- spill_stack_slot[from_reg] = x;
+ stack_slot = x;
+ if (BYTES_BIG_ENDIAN)
+ {
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot.
+ This is so we can do a big-endian correction unconditionally
+ below. */
+ adjust = GET_MODE_SIZE (mode) - total_size;
+ if (adjust)
+ stack_slot = gen_rtx (MEM, mode_for_size (total_size
+ * BITS_PER_UNIT,
+ MODE_INT, 1),
+ plus_constant (XEXP (x, 0), adjust));
+ }
+ spill_stack_slot[from_reg] = stack_slot;
spill_stack_slot_width[from_reg] = total_size;
}
-#if BYTES_BIG_ENDIAN
/* On a big endian machine, the "address" of the slot
is the address of the low part that fits its inherent mode. */
- if (inherent_size < total_size)
+ if (BYTES_BIG_ENDIAN && inherent_size < total_size)
adjust += (total_size - inherent_size);
-#endif /* BYTES_BIG_ENDIAN */
/* If we have any adjustment to make, or if the stack slot is the
wrong mode, make a new stack slot. */
/* If this is the product of an eliminable register and a
constant, apply the distribute law and move the constant out
so that we have (plus (mult ..) ..). This is needed in order
- to keep load-address insns valid. This case is pathalogical.
+ to keep load-address insns valid. This case is pathological.
We ignore the possibility of overflow here. */
if (GET_CODE (XEXP (x, 0)) == REG
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
case DIV: case UDIV:
case MOD: case UMOD:
case AND: case IOR: case XOR:
- case LSHIFT: case ASHIFT: case ROTATE:
- case ASHIFTRT: case LSHIFTRT: case ROTATERT:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
case NE: case EQ:
case GE: case GT: case GEU: case GTU:
case LE: case LT: case LEU: case LTU:
/* If we didn't change anything, we must retain the pseudo. */
if (new == reg_equiv_memory_loc[REGNO (SUBREG_REG (x))])
- new = XEXP (x, 0);
+ new = SUBREG_REG (x);
else
- /* Otherwise, ensure NEW isn't shared in case we have to reload
- it. */
- new = copy_rtx (new);
+ {
+ /* Otherwise, ensure NEW isn't shared in case we have to reload
+ it. */
+ new = copy_rtx (new);
+
+ /* In this case, we must show that the pseudo is used in this
+ insn so that delete_output_reload will do the right thing. */
+ if (insn != 0 && GET_CODE (insn) != EXPR_LIST
+ && GET_CODE (insn) != INSN_LIST)
+ emit_insn_before (gen_rtx (USE, VOIDmode, SUBREG_REG (x)),
+ insn);
+ }
}
else
new = eliminate_regs (SUBREG_REG (x), mem_mode, insn);
smaller. So leave the SUBREG then. */
&& ! (GET_CODE (SUBREG_REG (x)) == REG
&& GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD
- && GET_MODE_SIZE (GET_MODE (new)) <= UNITS_PER_WORD)
+ && GET_MODE_SIZE (GET_MODE (new)) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (new)))
+ && INTEGRAL_MODE_P (GET_MODE (new))
+ && LOAD_EXTEND_OP (GET_MODE (new)) != NIL)
#endif
)
{
int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
enum machine_mode mode = GET_MODE (x);
-#if BYTES_BIG_ENDIAN
- offset += (MIN (UNITS_PER_WORD,
- GET_MODE_SIZE (GET_MODE (new)))
- - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
-#endif
+ if (BYTES_BIG_ENDIAN)
+ offset += (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (new)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
PUT_MODE (new, mode);
XEXP (new, 0) = plus_constant (XEXP (new, 0), offset);
if (new != XEXP (x, i) && ! copied)
{
rtx new_x = rtx_alloc (code);
- bcopy (x, new_x, (sizeof (*new_x) - sizeof (new_x->fld)
- + (sizeof (new_x->fld[0])
- * GET_RTX_LENGTH (code))));
+ bcopy ((char *) x, (char *) new_x,
+ (sizeof (*new_x) - sizeof (new_x->fld)
+ + sizeof (new_x->fld[0]) * GET_RTX_LENGTH (code)));
x = new_x;
copied = 1;
}
if (! copied)
{
rtx new_x = rtx_alloc (code);
- bcopy (x, new_x, (sizeof (*new_x) - sizeof (new_x->fld)
- + (sizeof (new_x->fld[0])
- * GET_RTX_LENGTH (code))));
+ bcopy ((char *) x, (char *) new_x,
+ (sizeof (*new_x) - sizeof (new_x->fld)
+ + (sizeof (new_x->fld[0])
+ * GET_RTX_LENGTH (code))));
x = new_x;
copied = 1;
}
int replace;
{
rtx old_body = PATTERN (insn);
+ rtx old_set = single_set (insn);
rtx new_body;
int val = 0;
struct elim_table *ep;
if (! replace)
push_obstacks (&reload_obstack, &reload_obstack);
- if (GET_CODE (old_body) == SET && GET_CODE (SET_DEST (old_body)) == REG
- && REGNO (SET_DEST (old_body)) < FIRST_PSEUDO_REGISTER)
+ if (old_set != 0 && GET_CODE (SET_DEST (old_set)) == REG
+ && REGNO (SET_DEST (old_set)) < FIRST_PSEUDO_REGISTER)
{
/* Check for setting an eliminable register. */
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
- if (ep->from_rtx == SET_DEST (old_body) && ep->can_eliminate)
+ if (ep->from_rtx == SET_DEST (old_set) && ep->can_eliminate)
{
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ /* If this is setting the frame pointer register to the
+ hardware frame pointer register and this is an elimination
+ that will be done (tested above), this insn is really
+ adjusting the frame pointer downward to compensate for
+ the adjustment done before a nonlocal goto. */
+ if (ep->from == FRAME_POINTER_REGNUM
+ && ep->to == HARD_FRAME_POINTER_REGNUM)
+ {
+ rtx src = SET_SRC (old_set);
+ int offset, ok = 0;
+
+ if (src == ep->to_rtx)
+ offset = 0, ok = 1;
+ else if (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == CONST_INT)
+ offset = INTVAL (XEXP (src, 0)), ok = 1;
+
+ if (ok)
+ {
+ if (replace)
+ {
+ rtx src
+ = plus_constant (ep->to_rtx, offset - ep->offset);
+
+ /* First see if this insn remains valid when we
+ make the change. If not, keep the INSN_CODE
+ the same and let reload fit it up. */
+ validate_change (insn, &SET_SRC (old_set), src, 1);
+ validate_change (insn, &SET_DEST (old_set),
+ ep->to_rtx, 1);
+ if (! apply_change_group ())
+ {
+ SET_SRC (old_set) = src;
+ SET_DEST (old_set) = ep->to_rtx;
+ }
+ }
+
+ val = 1;
+ goto done;
+ }
+ }
+#endif
+
/* In this case this insn isn't serving a useful purpose. We
will delete it in reload_as_needed once we know that this
elimination is, in fact, being done.
- If REPLACE isn't set, we can't delete this insn, but neededn't
+ If REPLACE isn't set, we can't delete this insn, but needn't
process it since it won't be used unless something changes. */
if (replace)
delete_dead_insn (insn);
We have to do this here, rather than in eliminate_regs, do that we can
change the insn code. */
- if (GET_CODE (SET_SRC (old_body)) == PLUS
- && GET_CODE (XEXP (SET_SRC (old_body), 0)) == REG
- && GET_CODE (XEXP (SET_SRC (old_body), 1)) == CONST_INT)
+ if (GET_CODE (SET_SRC (old_set)) == PLUS
+ && GET_CODE (XEXP (SET_SRC (old_set), 0)) == REG
+ && GET_CODE (XEXP (SET_SRC (old_set), 1)) == CONST_INT)
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS];
ep++)
- if (ep->from_rtx == XEXP (SET_SRC (old_body), 0)
+ if (ep->from_rtx == XEXP (SET_SRC (old_set), 0)
&& ep->can_eliminate)
{
/* We must stop at the first elimination that will be used.
If this one would replace the PLUS with a REG, do it
now. Otherwise, quit the loop and let eliminate_regs
do its normal replacement. */
- if (ep->offset == - INTVAL (XEXP (SET_SRC (old_body), 1)))
+ if (ep->offset == - INTVAL (XEXP (SET_SRC (old_set), 1)))
{
+ /* We assume here that we don't need a PARALLEL of
+ any CLOBBERs for this assignment. There's not
+ much we can do if we do need it. */
PATTERN (insn) = gen_rtx (SET, VOIDmode,
- SET_DEST (old_body), ep->to_rtx);
+ SET_DEST (old_set), ep->to_rtx);
INSN_CODE (insn) = -1;
val = 1;
goto done;
if (! replace && asm_noperands (old_body) < 0)
new_body = copy_rtx (new_body);
- /* If we had a move insn but now we don't, rerecognize it. */
- if ((GET_CODE (old_body) == SET && GET_CODE (SET_SRC (old_body)) == REG
- && (GET_CODE (new_body) != SET
- || GET_CODE (SET_SRC (new_body)) != REG))
- /* If this was a load from or store to memory, compare
- the MEM in recog_operand to the one in the insn. If they
- are not equal, then rerecognize the insn. */
- || (GET_CODE (old_body) == SET
- && ((GET_CODE (SET_SRC (old_body)) == MEM
- && SET_SRC (old_body) != recog_operand[1])
- || (GET_CODE (SET_DEST (old_body)) == MEM
- && SET_DEST (old_body) != recog_operand[0])))
- /* If this was an add insn before, rerecognize. */
- ||
- (GET_CODE (old_body) == SET
- && GET_CODE (SET_SRC (old_body)) == PLUS))
+ /* If we had a move insn but now we don't, rerecognize it. This will
+ cause spurious re-recognition if the old move had a PARALLEL since
+ the new one still will, but we can't call single_set without
+ having put NEW_BODY into the insn and the re-recognition won't
+ hurt in this rare case. */
+ if (old_set != 0
+ && ((GET_CODE (SET_SRC (old_set)) == REG
+ && (GET_CODE (new_body) != SET
+ || GET_CODE (SET_SRC (new_body)) != REG))
+ /* If this was a load from or store to memory, compare
+ the MEM in recog_operand to the one in the insn. If they
+ are not equal, then rerecognize the insn. */
+ || (old_set != 0
+ && ((GET_CODE (SET_SRC (old_set)) == MEM
+ && SET_SRC (old_set) != recog_operand[1])
+ || (GET_CODE (SET_DEST (old_set)) == MEM
+ && SET_DEST (old_set) != recog_operand[0])))
+ /* If this was an add insn before, rerecognize. */
+ || GET_CODE (SET_SRC (old_set)) == PLUS))
{
if (! validate_change (insn, &PATTERN (insn), new_body, 0))
/* If recognition fails, store the new body anyway.
}
done:
- /* If we changed something, perform elmination in REG_NOTES. This is
+ /* If we changed something, perform elimination in REG_NOTES. This is
needed even when REPLACE is zero because a REG_DEAD note might refer
to a register that we eliminate and could cause a different number
of spill registers to be needed in the final reload pass than in
SET_HARD_REG_BIT (forbidden_regs, regno);
+ if (cant_eliminate)
+ regs_ever_live[regno] = 1;
+
/* Spill every pseudo reg that was allocated to this reg
or to something that overlaps this reg. */
return something_changed;
}
\f
-/* Find all paradoxical subregs within X and update reg_max_ref_width. */
+/* Find all paradoxical subregs within X and update reg_max_ref_width.
+ Also mark any hard registers used to store user variables as
+ forbidden from being used for spill registers. */
static void
scan_paradoxical_subregs (x)
switch (code)
{
+ case REG:
+#ifdef SMALL_REGISTER_CLASSES
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER && REG_USERVAR_P (x))
+ SET_HARD_REG_BIT (forbidden_regs, REGNO (x));
+#endif
+ return;
+
case CONST_INT:
case CONST:
case SYMBOL_REF:
case CONST_DOUBLE:
case CC0:
case PC:
- case REG:
case USE:
case CLOBBER:
return;
potential_reload_regs[o++] = hard_reg_n_uses[i].regno;
}
\f
+/* Used in reload_as_needed to sort the spilled regs. */
+
+static int
+compare_spill_regs (r1, r2)
+ short *r1, *r2;
+{
+ return *r1 - *r2;
+}
+
/* Reload pseudo-registers into hard regs around each insn as needed.
Additional register load insns are output before the insn that needs it
and perhaps store insns after insns that modify the reloaded pseudo reg.
rtx x;
rtx after_call = 0;
- bzero (spill_reg_rtx, sizeof spill_reg_rtx);
+ bzero ((char *) spill_reg_rtx, sizeof spill_reg_rtx);
+ bzero ((char *) spill_reg_store, sizeof spill_reg_store);
reg_last_reload_reg = (rtx *) alloca (max_regno * sizeof (rtx));
- bzero (reg_last_reload_reg, max_regno * sizeof (rtx));
+ bzero ((char *) reg_last_reload_reg, max_regno * sizeof (rtx));
reg_has_output_reload = (char *) alloca (max_regno);
for (i = 0; i < n_spills; i++)
{
num_not_at_initial_offset = 0;
+ /* Order the spilled regs, so that allocate_reload_regs can guarantee to
+ pack registers with group needs. */
+ if (n_spills > 1)
+ {
+ qsort (spill_regs, n_spills, sizeof (short), compare_spill_regs);
+ for (i = 0; i < n_spills; i++)
+ spill_reg_order[spill_regs[i]] = i;
+ }
+
for (insn = first; insn;)
{
register rtx next = NEXT_INSN (insn);
else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
{
rtx avoid_return_reg = 0;
+ rtx oldpat = PATTERN (insn);
#ifdef SMALL_REGISTER_CLASSES
/* Set avoid_return_reg if this is an insn
&& !(GET_CODE (PATTERN (insn)) == SET
&& SET_DEST (PATTERN (insn)) == stack_pointer_rtx))
{
- if (reg_mentioned_p (after_call, PATTERN (insn)))
+ if (reg_referenced_p (after_call, PATTERN (insn)))
avoid_return_reg = after_call;
after_call = 0;
}
&& ! reload_optional[i]
&& (reload_in[i] != 0 || reload_out[i] != 0
|| reload_secondary_p[i] != 0))
- abort ();
+ fatal_insn ("Non-optional registers need a spill register", insn);
/* Now compute which reload regs to reload them into. Perhaps
reusing reload regs from previous insns, or else output
for this insn in order to be stored in
(obeying register constraints). That is correct; such reload
registers ARE still valid. */
- note_stores (PATTERN (insn), forget_old_reloads_1);
+ note_stores (oldpat, forget_old_reloads_1);
/* There may have been CLOBBER insns placed after INSN. So scan
between INSN and NEXT and use them to forget old reloads. */
static HARD_REG_SET reload_reg_used_in_output[MAX_RECOG_OPERANDS];
/* If reg is in use for a RELOAD_FOR_OPERAND_ADDRESS reload. */
static HARD_REG_SET reload_reg_used_in_op_addr;
+/* If reg is in use for a RELOAD_FOR_OPADDR_ADDR reload. */
+static HARD_REG_SET reload_reg_used_in_op_addr_reload;
/* If reg is in use for a RELOAD_FOR_INSN reload. */
static HARD_REG_SET reload_reg_used_in_insn;
/* If reg is in use for a RELOAD_FOR_OTHER_ADDRESS reload. */
SET_HARD_REG_BIT (reload_reg_used_in_op_addr, i);
break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ SET_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, i);
+ break;
+
case RELOAD_FOR_OTHER_ADDRESS:
SET_HARD_REG_BIT (reload_reg_used_in_other_addr, i);
break;
CLEAR_HARD_REG_BIT (reload_reg_used_in_op_addr, i);
break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, i);
+ break;
+
case RELOAD_FOR_OTHER_ADDRESS:
CLEAR_HARD_REG_BIT (reload_reg_used_in_other_addr, i);
break;
switch (type)
{
case RELOAD_OTHER:
- /* In use for anything means not available for a RELOAD_OTHER. */
- return ! TEST_HARD_REG_BIT (reload_reg_used_at_all, regno);
+ /* In use for anything except RELOAD_FOR_OTHER_ADDRESS means
+ we can't use it for RELOAD_OTHER. */
+ if (TEST_HARD_REG_BIT (reload_reg_used, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
- /* The other kinds of use can sometimes share a register. */
case RELOAD_FOR_INPUT:
if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
|| TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno))
return 0;
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno))
+ return 0;
+
/* If it is used for some other input, can't use it. */
for (i = 0; i < reload_n_operands; i++)
if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
&& ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno));
+ case RELOAD_FOR_OPADDR_ADDR:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (!TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno));
+
case RELOAD_FOR_OUTPUT:
/* This cannot share a register with RELOAD_FOR_INSN reloads, other
outputs, or an operand address for this or an earlier output. */
return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
case RELOAD_FOR_OPERAND_ADDRESS:
+ case RELOAD_FOR_OPADDR_ADDR:
case RELOAD_FOR_INSN:
/* These can't conflict with inputs, or each other, so all we have to
test is input addresses and the addresses of OTHER items. */
|| TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
return 0;
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno))
+ return 0;
+
return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
&& ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno));
return 1;
+ case RELOAD_FOR_OPADDR_ADDR:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && !TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno));
+
case RELOAD_FOR_INSN:
- /* These conflict with other outputs with with RELOAD_OTHER. So
+ /* These conflict with other outputs with RELOAD_OTHER. So
we need only check for output addresses. */
opnum = -1;
abort ();
}
\f
+/* Return 1 if the reloads denoted by R1 and R2 cannot share a register.
+ Return 0 otherwise.
+
+ This function uses the same algorithm as reload_reg_free_p above. */
+
+static int
+reloads_conflict (r1, r2)
+ int r1, r2;
+{
+ enum reload_type r1_type = reload_when_needed[r1];
+ enum reload_type r2_type = reload_when_needed[r2];
+ int r1_opnum = reload_opnum[r1];
+ int r2_opnum = reload_opnum[r2];
+
+ /* RELOAD_OTHER conflicts with everything except RELOAD_FOR_OTHER_ADDRESS. */
+
+ if (r2_type == RELOAD_OTHER && r1_type != RELOAD_FOR_OTHER_ADDRESS)
+ return 1;
+
+ /* Otherwise, check conflicts differently for each type. */
+
+ switch (r1_type)
+ {
+ case RELOAD_FOR_INPUT:
+ return (r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS
+ || r2_type == RELOAD_FOR_OPADDR_ADDR
+ || r2_type == RELOAD_FOR_INPUT
+ || (r2_type == RELOAD_FOR_INPUT_ADDRESS && r2_opnum > r1_opnum));
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ return ((r2_type == RELOAD_FOR_INPUT_ADDRESS && r1_opnum == r2_opnum)
+ || (r2_type == RELOAD_FOR_INPUT && r2_opnum < r1_opnum));
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ return ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS && r2_opnum == r1_opnum)
+ || (r2_type == RELOAD_FOR_OUTPUT && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS);
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ return (r2_type == RELOAD_FOR_INPUT
+ || r2_type == RELOAD_FOR_OPADDR_ADDR);
+
+ case RELOAD_FOR_OUTPUT:
+ return (r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OUTPUT
+ || (r2_type == RELOAD_FOR_OUTPUT_ADDRESS
+ && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_INSN:
+ return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_OUTPUT
+ || r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS);
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ return r2_type == RELOAD_FOR_OTHER_ADDRESS;
+
+ case RELOAD_OTHER:
+ return r2_type != RELOAD_FOR_OTHER_ADDRESS;
+
+ default:
+ abort ();
+ }
+}
+\f
/* Vector of reload-numbers showing the order in which the reloads should
be processed. */
short reload_order[MAX_RELOADS];
or -1 if we did not need one of the spill registers for this reload. */
int reload_spill_index[MAX_RELOADS];
-/* Index of last register assigned as a spill register. We allocate in
- a round-robin fashio. */
-
-static int last_spill_reg = 0;
-
/* Find a spill register to use as a reload register for reload R.
LAST_RELOAD is non-zero if this is the last reload for the insn being
processed.
/* I is the index in spill_regs.
We advance it round-robin between insns to use all spill regs
equally, so that inherited reloads have a chance
- of leapfrogging each other. */
-
- for (count = 0, i = last_spill_reg; count < n_spills; count++)
+ of leapfrogging each other. Don't do this, however, when we have
+ group needs and failure would be fatal; if we only have a relatively
+ small number of spill registers, and more than one of them has
+ group needs, then by starting in the middle, we may end up
+ allocating the first one in such a way that we are not left with
+ sufficient groups to handle the rest. */
+
+ if (noerror || ! force_group)
+ i = last_spill_reg;
+ else
+ i = -1;
+
+ for (count = 0; count < n_spills; count++)
{
int class = (int) reload_reg_class[r];
failure:
if (asm_noperands (PATTERN (insn)) < 0)
/* It's the compiler's fault. */
- abort ();
+ fatal_insn ("Could not find a spill register", insn);
/* It's the user's fault; the operand's mode and constraint
don't match. Disable this reload so we don't crash in final. */
HARD_REG_SET save_reload_reg_used_in_input[MAX_RECOG_OPERANDS];
HARD_REG_SET save_reload_reg_used_in_output[MAX_RECOG_OPERANDS];
HARD_REG_SET save_reload_reg_used_in_op_addr;
+ HARD_REG_SET save_reload_reg_used_in_op_addr_reload;
HARD_REG_SET save_reload_reg_used_in_insn;
HARD_REG_SET save_reload_reg_used_in_other_addr;
HARD_REG_SET save_reload_reg_used_at_all;
bzero (reload_inherited, MAX_RELOADS);
- bzero (reload_inheritance_insn, MAX_RELOADS * sizeof (rtx));
- bzero (reload_override_in, MAX_RELOADS * sizeof (rtx));
+ bzero ((char *) reload_inheritance_insn, MAX_RELOADS * sizeof (rtx));
+ bzero ((char *) reload_override_in, MAX_RELOADS * sizeof (rtx));
CLEAR_HARD_REG_SET (reload_reg_used);
CLEAR_HARD_REG_SET (reload_reg_used_at_all);
CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr_reload);
CLEAR_HARD_REG_SET (reload_reg_used_in_insn);
CLEAR_HARD_REG_SET (reload_reg_used_in_other_addr);
if (n_reloads > 1)
qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower);
- bcopy (reload_reg_rtx, save_reload_reg_rtx, sizeof reload_reg_rtx);
+ bcopy ((char *) reload_reg_rtx, (char *) save_reload_reg_rtx,
+ sizeof reload_reg_rtx);
bcopy (reload_inherited, save_reload_inherited, sizeof reload_inherited);
- bcopy (reload_inheritance_insn, save_reload_inheritance_insn,
+ bcopy ((char *) reload_inheritance_insn,
+ (char *) save_reload_inheritance_insn,
sizeof reload_inheritance_insn);
- bcopy (reload_override_in, save_reload_override_in,
+ bcopy ((char *) reload_override_in, (char *) save_reload_override_in,
sizeof reload_override_in);
- bcopy (reload_spill_index, save_reload_spill_index,
+ bcopy ((char *) reload_spill_index, (char *) save_reload_spill_index,
sizeof reload_spill_index);
COPY_HARD_REG_SET (save_reload_reg_used, reload_reg_used);
COPY_HARD_REG_SET (save_reload_reg_used_at_all, reload_reg_used_at_all);
COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr,
reload_reg_used_in_op_addr);
+
+ COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr_reload,
+ reload_reg_used_in_op_addr_reload);
+
COPY_HARD_REG_SET (save_reload_reg_used_in_insn,
reload_reg_used_in_insn);
COPY_HARD_REG_SET (save_reload_reg_used_in_other_addr,
/* Loop around and try without any inheritance. */
/* First undo everything done by the failed attempt
to allocate with inheritance. */
- bcopy (save_reload_reg_rtx, reload_reg_rtx, sizeof reload_reg_rtx);
- bcopy (save_reload_inherited, reload_inherited, sizeof reload_inherited);
- bcopy (save_reload_inheritance_insn, reload_inheritance_insn,
+ bcopy ((char *) save_reload_reg_rtx, (char *) reload_reg_rtx,
+ sizeof reload_reg_rtx);
+ bcopy ((char *) save_reload_inherited, (char *) reload_inherited,
+ sizeof reload_inherited);
+ bcopy ((char *) save_reload_inheritance_insn,
+ (char *) reload_inheritance_insn,
sizeof reload_inheritance_insn);
- bcopy (save_reload_override_in, reload_override_in,
+ bcopy ((char *) save_reload_override_in, (char *) reload_override_in,
sizeof reload_override_in);
- bcopy (save_reload_spill_index, reload_spill_index,
+ bcopy ((char *) save_reload_spill_index, (char *) reload_spill_index,
sizeof reload_spill_index);
COPY_HARD_REG_SET (reload_reg_used, save_reload_reg_used);
COPY_HARD_REG_SET (reload_reg_used_at_all, save_reload_reg_used_at_all);
COPY_HARD_REG_SET (reload_reg_used_in_op_addr,
save_reload_reg_used_in_op_addr);
+ COPY_HARD_REG_SET (reload_reg_used_in_op_addr_reload,
+ save_reload_reg_used_in_op_addr_reload);
COPY_HARD_REG_SET (reload_reg_used_in_insn,
save_reload_reg_used_in_insn);
COPY_HARD_REG_SET (reload_reg_used_in_other_addr,
rtx output_reload_insns[MAX_RECOG_OPERANDS];
rtx output_address_reload_insns[MAX_RECOG_OPERANDS];
rtx operand_reload_insns = 0;
+ rtx other_operand_reload_insns = 0;
+ rtx other_output_reload_insns = 0;
rtx following_insn = NEXT_INSN (insn);
rtx before_insn = insn;
int special;
input_reload_insns[j] = input_address_reload_insns[j]
= output_reload_insns[j] = output_address_reload_insns[j] = 0;
- /* If this is a CALL_INSN preceded by USE insns, any reload insns
- must go in front of the first USE insn, not in front of INSN. */
-
- if (GET_CODE (insn) == CALL_INSN && GET_CODE (PREV_INSN (insn)) == INSN
- && GET_CODE (PATTERN (PREV_INSN (insn))) == USE)
- while (GET_CODE (PREV_INSN (before_insn)) == INSN
- && GET_CODE (PATTERN (PREV_INSN (before_insn))) == USE)
- before_insn = PREV_INSN (before_insn);
-
- /* If INSN is followed by any CLOBBER insns made by find_reloads,
- put our reloads after them since they may otherwise be
- misinterpreted. */
-
- while (GET_CODE (following_insn) == INSN
- && GET_MODE (following_insn) == DImode
- && GET_CODE (PATTERN (following_insn)) == CLOBBER
- && NEXT_INSN (following_insn) != 0)
- following_insn = NEXT_INSN (following_insn);
-
/* Now output the instructions to copy the data into and out of the
reload registers. Do these in the order that the reloads were reported,
since reloads of base and index registers precede reloads of operands
{
register rtx old;
rtx oldequiv_reg = 0;
- rtx store_insn = 0;
+
+ if (reload_spill_index[j] >= 0)
+ new_spill_reg_store[reload_spill_index[j]] = 0;
old = reload_in[j];
if (old != 0 && ! reload_inherited[j]
else if (GET_CODE (oldequiv) == SUBREG)
oldequiv_reg = SUBREG_REG (oldequiv);
+ /* If we are reloading from a register that was recently stored in
+ with an output-reload, see if we can prove there was
+ actually no need to store the old value in it. */
+
+ if (optimize && GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) < FIRST_PSEUDO_REGISTER
+ && spill_reg_order[REGNO (oldequiv)] >= 0
+ && spill_reg_store[spill_reg_order[REGNO (oldequiv)]] != 0
+ && find_reg_note (insn, REG_DEAD, reload_in[j])
+ /* This is unsafe if operand occurs more than once in current
+ insn. Perhaps some occurrences weren't reloaded. */
+ && count_occurrences (PATTERN (insn), reload_in[j]) == 1)
+ delete_output_reload
+ (insn, j, spill_reg_store[spill_reg_order[REGNO (oldequiv)]]);
+
/* Encapsulate both RELOADREG and OLDEQUIV into that mode,
then load RELOADREG from OLDEQUIV. Note that we cannot use
gen_lowpart_common since it can do the wrong thing when
case RELOAD_FOR_OPERAND_ADDRESS:
where = &operand_reload_insns;
break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ where = &other_operand_reload_insns;
+ break;
case RELOAD_FOR_OTHER_ADDRESS:
where = &other_input_address_reload_insns;
break;
third_reload_reg)));
}
else
- gen_input_reload (second_reload_reg, oldequiv,
- reload_opnum[j],
- reload_when_needed[j]);
+ gen_reload (second_reload_reg, oldequiv,
+ reload_opnum[j],
+ reload_when_needed[j]);
oldequiv = second_reload_reg;
}
#endif
if (! special && ! rtx_equal_p (reloadreg, oldequiv))
- gen_input_reload (reloadreg, oldequiv, reload_opnum[j],
- reload_when_needed[j]);
+ gen_reload (reloadreg, oldequiv, reload_opnum[j],
+ reload_when_needed[j]);
#if defined(SECONDARY_INPUT_RELOAD_CLASS) && defined(PRESERVE_DEATH_INFO_REGNO_P)
/* We may have to make a REG_DEAD note for the secondary reload
XEXP (note, 0) = reload_reg_rtx[j];
continue;
}
+ /* Likewise for a SUBREG of an operand that dies. */
+ else if (GET_CODE (old) == SUBREG
+ && GET_CODE (SUBREG_REG (old)) == REG
+ && 0 != (note = find_reg_note (insn, REG_UNUSED,
+ SUBREG_REG (old))))
+ {
+ XEXP (note, 0) = gen_lowpart_common (GET_MODE (old),
+ reload_reg_rtx[j]);
+ continue;
+ }
else if (GET_CODE (old) == SCRATCH)
/* If we aren't optimizing, there won't be a REG_UNUSED note,
but we don't want to make an output reload. */
if (GET_CODE (insn) == JUMP_INSN)
abort ();
- push_to_sequence (output_reload_insns[reload_opnum[j]]);
+ if (reload_when_needed[j] == RELOAD_OTHER)
+ push_to_sequence (other_output_reload_insns);
+ else
+ push_to_sequence (output_reload_insns[reload_opnum[j]]);
/* Determine the mode to reload in.
See comments above (for input reloading). */
/* VOIDmode should never happen for an output. */
if (asm_noperands (PATTERN (insn)) < 0)
/* It's the compiler's fault. */
- abort ();
+ fatal_insn ("VOIDmode on an output", insn);
error_for_asm (insn, "output operand is constant in `asm'");
/* Prevent crash--use something we know is valid. */
mode = word_mode;
#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
/* If we need two reload regs, set RELOADREG to the intermediate
- one, since it will be stored into OUT. We might need a secondary
+ one, since it will be stored into OLD. We might need a secondary
register only for an input reload, so check again here. */
if (reload_secondary_out_reload[j] >= 0)
{
/* See if we need both a scratch and intermediate reload
register. */
+
int secondary_reload = reload_secondary_out_reload[j];
enum insn_code tertiary_icode
= reload_secondary_out_icode[secondary_reload];
- rtx pat;
if (GET_MODE (reloadreg) != mode)
reloadreg = gen_rtx (REG, mode, REGNO (reloadreg));
{
rtx third_reloadreg
= reload_reg_rtx[reload_secondary_out_reload[secondary_reload]];
- pat = (GEN_FCN (tertiary_icode)
- (reloadreg, second_reloadreg, third_reloadreg));
- }
-#ifdef SECONDARY_MEMORY_NEEDED
- /* If we need a memory location to do the move, do it that way. */
- else if (GET_CODE (reloadreg) == REG
- && REGNO (reloadreg) < FIRST_PSEUDO_REGISTER
- && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (reloadreg)),
- REGNO_REG_CLASS (REGNO (second_reloadreg)),
- GET_MODE (second_reloadreg)))
- {
- /* Get the memory to use and rewrite both registers
- to its mode. */
- rtx loc
- = get_secondary_mem (reloadreg,
- GET_MODE (second_reloadreg),
- reload_opnum[j],
- reload_when_needed[j]);
- rtx tmp_reloadreg;
-
- if (GET_MODE (loc) != GET_MODE (second_reloadreg))
- second_reloadreg = gen_rtx (REG, GET_MODE (loc),
- REGNO (second_reloadreg));
-
- if (GET_MODE (loc) != GET_MODE (reloadreg))
- tmp_reloadreg = gen_rtx (REG, GET_MODE (loc),
- REGNO (reloadreg));
- else
- tmp_reloadreg = reloadreg;
-
- emit_move_insn (loc, second_reloadreg);
- pat = gen_move_insn (tmp_reloadreg, loc);
+ rtx tem;
+
+ /* Copy primary reload reg to secondary reload reg.
+ (Note that these have been swapped above, then
+ secondary reload reg to OLD using our insn. */
+
+ /* If REAL_OLD is a paradoxical SUBREG, remove it
+ and try to put the opposite SUBREG on
+ RELOADREG. */
+ if (GET_CODE (real_old) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (real_old))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (real_old))))
+ && 0 != (tem = gen_lowpart_common
+ (GET_MODE (SUBREG_REG (real_old)),
+ reloadreg)))
+ real_old = SUBREG_REG (real_old), reloadreg = tem;
+
+ gen_reload (reloadreg, second_reloadreg,
+ reload_opnum[j], reload_when_needed[j]);
+ emit_insn ((GEN_FCN (tertiary_icode)
+ (real_old, reloadreg, third_reloadreg)));
+ special = 1;
}
-#endif
+
else
- pat = gen_move_insn (reloadreg, second_reloadreg);
+ /* Copy between the reload regs here and then to
+ OUT later. */
- emit_insn (pat);
+ gen_reload (reloadreg, second_reloadreg,
+ reload_opnum[j], reload_when_needed[j]);
}
}
}
/* Output the last reload insn. */
if (! special)
- {
-#ifdef SECONDARY_MEMORY_NEEDED
- /* If we need a memory location to do the move, do it that way. */
- if (GET_CODE (old) == REG && REGNO (old) < FIRST_PSEUDO_REGISTER
- && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (old)),
- REGNO_REG_CLASS (REGNO (reloadreg)),
- GET_MODE (reloadreg)))
- {
- /* Get the memory to use and rewrite both registers to
- its mode. */
- rtx loc = get_secondary_mem (old, GET_MODE (reloadreg),
- reload_opnum[j],
- reload_when_needed[j]);
-
- if (GET_MODE (loc) != GET_MODE (reloadreg))
- reloadreg = gen_rtx (REG, GET_MODE (loc),
- REGNO (reloadreg));
-
- if (GET_MODE (loc) != GET_MODE (old))
- old = gen_rtx (REG, GET_MODE (loc), REGNO (old));
-
- emit_insn (gen_move_insn (loc, reloadreg));
- emit_insn (gen_move_insn (old, loc));
- }
- else
-#endif
- emit_insn (gen_move_insn (old, reloadreg));
- }
+ gen_reload (old, reloadreg, reload_opnum[j],
+ reload_when_needed[j]);
#ifdef PRESERVE_DEATH_INFO_REGNO_P
/* If final will look at death notes for this reg,
reg_has_output_reload will make this do nothing. */
note_stores (PATTERN (p), forget_old_reloads_1);
- if (reg_mentioned_p (reload_reg_rtx[j], PATTERN (p)))
- store_insn = p;
+ if (reg_mentioned_p (reload_reg_rtx[j], PATTERN (p))
+ && reload_spill_index[j] >= 0)
+ new_spill_reg_store[reload_spill_index[j]] = p;
}
- output_reload_insns[reload_opnum[j]] = get_insns ();
- end_sequence ();
+ if (reload_when_needed[j] == RELOAD_OTHER)
+ other_output_reload_insns = get_insns ();
+ else
+ output_reload_insns[reload_opnum[j]] = get_insns ();
+ end_sequence ();
}
-
- if (reload_spill_index[j] >= 0)
- new_spill_reg_store[reload_spill_index[j]] = store_insn;
}
/* Now write all the insns we made for reloads in the order expected by
For each operand, any RELOAD_FOR_INPUT_ADDRESS reloads followed by
the RELOAD_FOR_INPUT reload for the operand.
+ RELOAD_FOR_OPADDR_ADDRS reloads.
+
RELOAD_FOR_OPERAND_ADDRESS reloads.
After the insn being reloaded, we write the following:
For each operand, any RELOAD_FOR_OUTPUT_ADDRESS reload followed by
- the RELOAD_FOR_OUTPUT reload for that operand. */
+ the RELOAD_FOR_OUTPUT reload for that operand.
+
+ Any RELOAD_OTHER output reloads. */
emit_insns_before (other_input_address_reload_insns, before_insn);
emit_insns_before (other_input_reload_insns, before_insn);
emit_insns_before (input_reload_insns[j], before_insn);
}
+ emit_insns_before (other_operand_reload_insns, before_insn);
emit_insns_before (operand_reload_insns, before_insn);
for (j = 0; j < reload_n_operands; j++)
emit_insns_before (output_reload_insns[j], following_insn);
}
+ emit_insns_before (other_output_reload_insns, following_insn);
+
/* Move death notes from INSN
to output-operand-address and output reload insns. */
#ifdef PRESERVE_DEATH_INFO_REGNO_P
if (nregno < FIRST_PSEUDO_REGISTER)
for (k = 1; k < nnr; k++)
reg_last_reload_reg[nregno + k]
- = (nr == nnr ? gen_rtx (REG, word_mode,
+ = (nr == nnr ? gen_rtx (REG,
+ reg_raw_mode[REGNO (reload_reg_rtx[r]) + k],
REGNO (reload_reg_rtx[r]) + k)
: 0);
if (nregno < FIRST_PSEUDO_REGISTER)
for (k = 1; k < nnr; k++)
reg_last_reload_reg[nregno + k]
- = (nr == nnr ? gen_rtx (REG, word_mode,
+ = (nr == nnr ? gen_rtx (REG,
+ reg_raw_mode[REGNO (reload_reg_rtx[r]) + k],
REGNO (reload_reg_rtx[r]) + k)
: 0);
if (i < 0 && reload_out[r] != 0 && GET_CODE (reload_out[r]) == REG)
{
register int nregno = REGNO (reload_out[r]);
- reg_last_reload_reg[nregno] = 0;
+ if (nregno >= FIRST_PSEUDO_REGISTER)
+ reg_last_reload_reg[nregno] = 0;
+ else
+ {
+ int num_regs = HARD_REGNO_NREGS (nregno,GET_MODE (reload_out[r]));
+
+ while (num_regs-- > 0)
+ reg_last_reload_reg[nregno + num_regs] = 0;
+ }
}
}
}
\f
-/* Emit code to perform an input reload of IN to RELOADREG. IN is from
- operand OPNUM with reload type TYPE.
+/* Emit code to perform a reload from IN (which may be a reload register) to
+ OUT (which may also be a reload register). IN or OUT is from operand
+ OPNUM with reload type TYPE.
Returns first insn emitted. */
rtx
-gen_input_reload (reloadreg, in, opnum, type)
- rtx reloadreg;
+gen_reload (out, in, opnum, type)
+ rtx out;
rtx in;
int opnum;
enum reload_type type;
{
rtx last = get_last_insn ();
+ rtx tem;
+
+ /* If IN is a paradoxical SUBREG, remove it and try to put the
+ opposite SUBREG on OUT. Likewise for a paradoxical SUBREG on OUT. */
+ if (GET_CODE (in) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (in))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+ && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (in)), out)) != 0)
+ in = SUBREG_REG (in), out = tem;
+ else if (GET_CODE (out) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (out))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))))
+ && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (out)), in)) != 0)
+ out = SUBREG_REG (out), in = tem;
/* How to do this reload can get quite tricky. Normally, we are being
asked to reload a simple operand, such as a MEM, a constant, or a pseudo
it will be A = A + B as constrain_operands expects. */
if (GET_CODE (XEXP (in, 1)) == REG
- && REGNO (reloadreg) == REGNO (XEXP (in, 1)))
+ && REGNO (out) == REGNO (XEXP (in, 1)))
tem = op0, op0 = op1, op1 = tem;
if (op0 != XEXP (in, 0) || op1 != XEXP (in, 1))
in = gen_rtx (PLUS, GET_MODE (in), op0, op1);
- insn = emit_insn (gen_rtx (SET, VOIDmode, reloadreg, in));
+ insn = emit_insn (gen_rtx (SET, VOIDmode, out, in));
code = recog_memoized (insn);
if (code >= 0)
&& REGNO (op1) >= FIRST_PSEUDO_REGISTER))
tem = op0, op0 = op1, op1 = tem;
- emit_insn (gen_move_insn (reloadreg, op0));
+ emit_insn (gen_move_insn (out, op0));
- /* If OP0 and OP1 are the same, we can use RELOADREG for OP1.
+ /* If OP0 and OP1 are the same, we can use OUT for OP1.
This fixes a problem on the 32K where the stack pointer cannot
be used as an operand of an add insn. */
if (rtx_equal_p (op0, op1))
- op1 = reloadreg;
+ op1 = out;
- insn = emit_insn (gen_add2_insn (reloadreg, op1));
+ insn = emit_insn (gen_add2_insn (out, op1));
/* If that failed, copy the address register to the reload register.
Then add the constant to the reload register. */
delete_insns_since (last);
- emit_insn (gen_move_insn (reloadreg, op1));
- emit_insn (gen_add2_insn (reloadreg, op0));
+ emit_insn (gen_move_insn (out, op1));
+ emit_insn (gen_add2_insn (out, op0));
}
#ifdef SECONDARY_MEMORY_NEEDED
/* If we need a memory location to do the move, do it that way. */
else if (GET_CODE (in) == REG && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && GET_CODE (out) == REG && REGNO (out) < FIRST_PSEUDO_REGISTER
&& SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (in)),
- REGNO_REG_CLASS (REGNO (reloadreg)),
- GET_MODE (reloadreg)))
+ REGNO_REG_CLASS (REGNO (out)),
+ GET_MODE (out)))
{
/* Get the memory to use and rewrite both registers to its mode. */
- rtx loc = get_secondary_mem (in, GET_MODE (reloadreg), opnum, type);
+ rtx loc = get_secondary_mem (in, GET_MODE (out), opnum, type);
- if (GET_MODE (loc) != GET_MODE (reloadreg))
- reloadreg = gen_rtx (REG, GET_MODE (loc), REGNO (reloadreg));
+ if (GET_MODE (loc) != GET_MODE (out))
+ out = gen_rtx (REG, GET_MODE (loc), REGNO (out));
if (GET_MODE (loc) != GET_MODE (in))
in = gen_rtx (REG, GET_MODE (loc), REGNO (in));
emit_insn (gen_move_insn (loc, in));
- emit_insn (gen_move_insn (reloadreg, loc));
+ emit_insn (gen_move_insn (out, loc));
}
#endif
/* If IN is a simple operand, use gen_move_insn. */
else if (GET_RTX_CLASS (GET_CODE (in)) == 'o' || GET_CODE (in) == SUBREG)
- emit_insn (gen_move_insn (reloadreg, in));
+ emit_insn (gen_move_insn (out, in));
#ifdef HAVE_reload_load_address
else if (HAVE_reload_load_address)
- emit_insn (gen_reload_load_address (reloadreg, in));
+ emit_insn (gen_reload_load_address (out, in));
#endif
- /* Otherwise, just write (set REGLOADREG IN) and hope for the best. */
+ /* Otherwise, just write (set OUT IN) and hope for the best. */
else
- emit_insn (gen_rtx (SET, VOIDmode, reloadreg, in));
+ emit_insn (gen_rtx (SET, VOIDmode, out, in));
/* Return the first insn emitted.
We can not just return get_last_insn, because there may have
emit_insn (gen_move_insn (reloadreg, incloc));
/* See if we can directly increment INCLOC. Use a method similar to that
- in gen_input_reload. */
+ in gen_reload. */
last = get_last_insn ();
add_insn = emit_insn (gen_rtx (SET, VOIDmode, incloc,