/* Emit RTL for the GNU C-Compiler expander.
- Copyright (C) 1987, 88, 92-97, 1998, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+ 1999, 2000, 2001 Free Software Foundation, Inc.
This file is part of GNU CC.
#include "toplev.h"
#include "rtl.h"
#include "tree.h"
+#include "tm_p.h"
#include "flags.h"
#include "function.h"
#include "expr.h"
#include "regs.h"
#include "hard-reg-set.h"
+#include "hashtab.h"
#include "insn-config.h"
#include "recog.h"
#include "real.h"
#include "obstack.h"
#include "bitmap.h"
+#include "basic-block.h"
#include "ggc.h"
/* Commonly used modes. */
All of these except perhaps the floating-point CONST_DOUBLEs
are unique; no other rtx-object will be equal to any of these. */
-/* Avoid warnings by initializing the `fld' field. Since its a union,
- bypass problems with KNR compilers by only doing so when __GNUC__. */
-#ifdef __GNUC__
-#define FLDI , {{0}}
-#else
-#define FLDI
-#endif
-
-struct _global_rtl global_rtl =
-{
- {PC, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* pc_rtx */
- {CC0, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* cc0_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* stack_pointer_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* frame_pointer_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* hard_frame_pointer_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* arg_pointer_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_incoming_args_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_stack_vars_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_stack_dynamic_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_outgoing_args_rtx */
- {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_cfa_rtx */
-};
+rtx global_rtl[GR_MAX];
/* We record floating-point CONST_DOUBLEs in each floating-point mode for
the values of 0, 1, and 2. For the integer entries and VOIDmode, we
to save space during the compilation and simplify comparisons of
integers. */
-struct rtx_def const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+
+/* A hash table storing CONST_INTs whose absolute value is greater
+ than MAX_SAVED_CONST_INT. */
+
+static htab_t const_int_htab;
/* start_sequence and gen_sequence can make a lot of rtx expressions which are
shortly thrown away. We use two mechanisms to prevent this waste:
- First, we keep a list of the expressions used to represent the sequence
- stack in sequence_element_free_list.
-
- Second, for sizes up to 5 elements, we keep a SEQUENCE and its associated
- rtvec for use by gen_sequence. One entry for each size is sufficient
- because most cases are calls to gen_sequence followed by immediately
- emitting the SEQUENCE. Reuse is safe since emitting a sequence is
- destructive on the insn in it anyway and hence can't be redone.
+ For sizes up to 5 elements, we keep a SEQUENCE and its associated
+ rtvec for use by gen_sequence. One entry for each size is
+ sufficient because most cases are calls to gen_sequence followed by
+ immediately emitting the SEQUENCE. Reuse is safe since emitting a
+ sequence is destructive on the insn in it anyway and hence can't be
+ redone.
We do not bother to save this cached data over nested function calls.
Instead, we just reinitialize them. */
#define SEQUENCE_RESULT_SIZE 5
-static struct sequence_stack *sequence_element_free_list;
static rtx sequence_result[SEQUENCE_RESULT_SIZE];
/* During RTL generation, we also keep a list of free INSN rtl codes. */
static rtx free_insn;
-#define first_insn (current_function->emit->x_first_insn)
-#define last_insn (current_function->emit->x_last_insn)
-#define cur_insn_uid (current_function->emit->x_cur_insn_uid)
-#define last_linenum (current_function->emit->x_last_linenum)
-#define last_filename (current_function->emit->x_last_filename)
-#define first_label_num (current_function->emit->x_first_label_num)
-
-static rtx make_jump_insn_raw PROTO((rtx));
-static rtx make_call_insn_raw PROTO((rtx));
-static rtx find_line_note PROTO((rtx));
-static void mark_sequence_stack PROTO((struct sequence_stack *));
+#define first_insn (cfun->emit->x_first_insn)
+#define last_insn (cfun->emit->x_last_insn)
+#define cur_insn_uid (cfun->emit->x_cur_insn_uid)
+#define last_linenum (cfun->emit->x_last_linenum)
+#define last_filename (cfun->emit->x_last_filename)
+#define first_label_num (cfun->emit->x_first_label_num)
+
+static rtx make_jump_insn_raw PARAMS ((rtx));
+static rtx make_call_insn_raw PARAMS ((rtx));
+static rtx find_line_note PARAMS ((rtx));
+static void mark_sequence_stack PARAMS ((struct sequence_stack *));
+static void unshare_all_rtl_1 PARAMS ((rtx));
+static void unshare_all_decls PARAMS ((tree));
+static void reset_used_decls PARAMS ((tree));
+static void mark_label_nuses PARAMS ((rtx));
+static hashval_t const_int_htab_hash PARAMS ((const void *));
+static int const_int_htab_eq PARAMS ((const void *,
+ const void *));
+static int rtx_htab_mark_1 PARAMS ((void **, void *));
+static void rtx_htab_mark PARAMS ((void *));
+
\f
+/* Returns a hash code for X (which is a really a CONST_INT). */
+
+static hashval_t
+const_int_htab_hash (x)
+ const void *x;
+{
+ return (hashval_t) INTVAL ((const struct rtx_def *) x);
+}
+
+/* Returns non-zero if the value represented by X (which is really a
+ CONST_INT) is the same as that given by Y (which is really a
+ HOST_WIDE_INT *). */
+
+static int
+const_int_htab_eq (x, y)
+ const void *x;
+ const void *y;
+{
+ return (INTVAL ((const struct rtx_def *) x) == *((const HOST_WIDE_INT *) y));
+}
+
+/* Mark the hash-table element X (which is really a pointer to an
+ rtx). */
+
+static int
+rtx_htab_mark_1 (x, data)
+ void **x;
+ void *data ATTRIBUTE_UNUSED;
+{
+ ggc_mark_rtx (*x);
+ return 1;
+}
+
+/* Mark all the elements of HTAB (which is really an htab_t full of
+ rtxs). */
+
+static void
+rtx_htab_mark (htab)
+ void *htab;
+{
+ htab_traverse (*((htab_t *) htab), rtx_htab_mark_1, NULL);
+}
+
+/* Generate a new REG rtx. Make sure ORIGINAL_REGNO is set properly, and
+ don't attempt to share with the various global pieces of rtl (such as
+ frame_pointer_rtx). */
+
rtx
-gen_rtx_CONST_INT (mode, arg)
+gen_raw_REG (mode, regno)
enum machine_mode mode;
+ int regno;
+{
+ rtx x = gen_rtx_raw_REG (mode, regno);
+ ORIGINAL_REGNO (x) = regno;
+ return x;
+}
+
+/* There are some RTL codes that require special attention; the generation
+ functions do the raw handling. If you add to this list, modify
+ special_rtx in gengenrtl.c as well. */
+
+rtx
+gen_rtx_CONST_INT (mode, arg)
+ enum machine_mode mode ATTRIBUTE_UNUSED;
HOST_WIDE_INT arg;
{
+ void **slot;
+
if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT)
- return &const_int_rtx[arg + MAX_SAVED_CONST_INT];
+ return const_int_rtx[arg + MAX_SAVED_CONST_INT];
#if STORE_FLAG_VALUE != 1 && STORE_FLAG_VALUE != -1
if (const_true_rtx && arg == STORE_FLAG_VALUE)
return const_true_rtx;
#endif
- return gen_rtx_raw_CONST_INT (mode, arg);
+ /* Look up the CONST_INT in the hash table. */
+ slot = htab_find_slot_with_hash (const_int_htab, &arg,
+ (hashval_t) arg, INSERT);
+ if (*slot == 0)
+ *slot = gen_rtx_raw_CONST_INT (VOIDmode, arg);
+
+ return (rtx) *slot;
}
-/* CONST_DOUBLEs needs special handling because its length is known
+/* CONST_DOUBLEs needs special handling because their length is known
only at run-time. */
+
rtx
gen_rtx_CONST_DOUBLE (mode, arg0, arg1, arg2)
enum machine_mode mode;
return stack_pointer_rtx;
}
- return gen_rtx_raw_REG (mode, regno);
+ return gen_raw_REG (mode, regno);
}
rtx
return rt;
}
+rtx
+gen_rtx_SUBREG (mode, reg, offset)
+ enum machine_mode mode;
+ rtx reg;
+ int offset;
+{
+ /* This is the most common failure type.
+ Catch it early so we can see who does it. */
+ if ((offset % GET_MODE_SIZE (mode)) != 0)
+ abort ();
+
+ /* This check isn't usable right now because combine will
+ throw arbitrary crap like a CALL into a SUBREG in
+ gen_lowpart_for_combine so we must just eat it. */
+#if 0
+ /* Check for this too. */
+ if (offset >= GET_MODE_SIZE (GET_MODE (reg)))
+ abort ();
+#endif
+ return gen_rtx_fmt_ei (SUBREG, mode, reg, offset);
+}
+
+/* Generate a SUBREG representing the least-significant part
+ * of REG if MODE is smaller than mode of REG, otherwise
+ * paradoxical SUBREG. */
+rtx
+gen_lowpart_SUBREG (mode, reg)
+ enum machine_mode mode;
+ rtx reg;
+{
+ enum machine_mode inmode;
+ int offset;
+
+ inmode = GET_MODE (reg);
+ if (inmode == VOIDmode)
+ inmode = mode;
+ offset = 0;
+ if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (inmode)
+ && (WORDS_BIG_ENDIAN || BYTES_BIG_ENDIAN))
+ {
+ offset = GET_MODE_SIZE (inmode) - GET_MODE_SIZE (mode);
+ if (! BYTES_BIG_ENDIAN)
+ offset = (offset / UNITS_PER_WORD) * UNITS_PER_WORD;
+ else if (! WORDS_BIG_ENDIAN)
+ offset %= UNITS_PER_WORD;
+ }
+ return gen_rtx_SUBREG (mode, reg, offset);
+}
+\f
/* rtx gen_rtx (code, mode, [element1, ..., elementn])
**
** This routine generates an RTX of the size specified by
/*VARARGS2*/
rtx
-gen_rtx VPROTO((enum rtx_code code, enum machine_mode mode, ...))
+gen_rtx VPARAMS ((enum rtx_code code, enum machine_mode mode, ...))
{
#ifndef ANSI_PROTOTYPES
enum rtx_code code;
/*VARARGS1*/
rtvec
-gen_rtvec VPROTO((int n, ...))
+gen_rtvec VPARAMS ((int n, ...))
{
#ifndef ANSI_PROTOTYPES
int n;
gen_reg_rtx (mode)
enum machine_mode mode;
{
- struct function *f = current_function;
+ struct function *f = cfun;
register rtx val;
/* Don't let anything called after initial flow analysis create new
if (no_new_pseudos)
abort ();
- if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
- || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ if (generating_concat_p
+ && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT))
{
/* For complex modes, don't make a single pseudo.
Instead, make a CONCAT of two pseudos.
return gen_rtx_CONCAT (mode, realpart, imagpart);
}
- /* Make sure regno_pointer_flag and regno_reg_rtx are large
- enough to have an element for this pseudo reg number. */
+ /* Make sure regno_pointer_align and regno_reg_rtx are large enough
+ to have an element for this pseudo reg number. */
- if (reg_rtx_no == f->emit->regno_pointer_flag_length)
+ if (reg_rtx_no == f->emit->regno_pointer_align_length)
{
- int old_size = f->emit->regno_pointer_flag_length;
+ int old_size = f->emit->regno_pointer_align_length;
rtx *new1;
char *new;
- new = xrealloc (f->emit->regno_pointer_flag, old_size * 2);
- memset (new + old_size, 0, old_size);
- f->emit->regno_pointer_flag = new;
-
new = xrealloc (f->emit->regno_pointer_align, old_size * 2);
memset (new + old_size, 0, old_size);
- f->emit->regno_pointer_align = new;
+ f->emit->regno_pointer_align = (unsigned char *) new;
new1 = (rtx *) xrealloc (f->emit->x_regno_reg_rtx,
old_size * 2 * sizeof (rtx));
memset (new1 + old_size, 0, old_size * sizeof (rtx));
regno_reg_rtx = new1;
- f->emit->regno_pointer_flag_length = old_size * 2;
+ f->emit->regno_pointer_align_length = old_size * 2;
}
- val = gen_rtx_raw_REG (mode, reg_rtx_no);
+ val = gen_raw_REG (mode, reg_rtx_no);
regno_reg_rtx[reg_rtx_no++] = val;
return val;
}
rtx reg;
int align;
{
- if (! REGNO_POINTER_FLAG (REGNO (reg)))
+ if (! REG_POINTER (reg))
{
- REGNO_POINTER_FLAG (REGNO (reg)) = 1;
+ REG_POINTER (reg) = 1;
if (align)
REGNO_POINTER_ALIGN (REGNO (reg)) = align;
return first_label_num;
}
\f
+/* Return the final regno of X, which is a SUBREG of a hard
+ register. */
+int
+subreg_hard_regno (x, check_mode)
+ register rtx x;
+ int check_mode;
+{
+ enum machine_mode mode = GET_MODE (x);
+ unsigned int byte_offset, base_regno, final_regno;
+ rtx reg = SUBREG_REG (x);
+
+ /* This is where we attempt to catch illegal subregs
+ created by the compiler. */
+ if (GET_CODE (x) != SUBREG
+ || GET_CODE (reg) != REG)
+ abort ();
+ base_regno = REGNO (reg);
+ if (base_regno >= FIRST_PSEUDO_REGISTER)
+ abort ();
+ if (check_mode && ! HARD_REGNO_MODE_OK (base_regno, GET_MODE (reg)))
+ abort ();
+
+ /* Catch non-congruent offsets too. */
+ byte_offset = SUBREG_BYTE (x);
+ if ((byte_offset % GET_MODE_SIZE (mode)) != 0)
+ abort ();
+
+ final_regno = subreg_regno (x);
+
+ return final_regno;
+}
+
/* Return a value representing some low-order bits of X, where the number
of low-order bits is given by MODE. Note that no conversion is done
between floating-point and fixed-point values, rather, the bit
enum machine_mode mode;
register rtx x;
{
- int word = 0;
+ int msize = GET_MODE_SIZE (mode);
+ int xsize = GET_MODE_SIZE (GET_MODE (x));
+ int offset = 0;
if (GET_MODE (x) == mode)
return x;
/* MODE must occupy no more words than the mode of X. */
if (GET_MODE (x) != VOIDmode
- && ((GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
- > ((GET_MODE_SIZE (GET_MODE (x)) + (UNITS_PER_WORD - 1))
- / UNITS_PER_WORD)))
+ && ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
+ > ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
return 0;
- if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
- word = ((GET_MODE_SIZE (GET_MODE (x))
- - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
- / UNITS_PER_WORD);
+ if ((WORDS_BIG_ENDIAN || BYTES_BIG_ENDIAN)
+ && xsize > msize)
+ {
+ int difference = xsize - msize;
+
+ if (WORDS_BIG_ENDIAN)
+ offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset += difference % UNITS_PER_WORD;
+ }
if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
&& (GET_MODE_CLASS (mode) == MODE_INT
else if (GET_CODE (x) == SUBREG
&& (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
|| GET_MODE_SIZE (mode) == GET_MODE_UNIT_SIZE (GET_MODE (x))))
- return (GET_MODE (SUBREG_REG (x)) == mode && SUBREG_WORD (x) == 0
- ? SUBREG_REG (x)
- : gen_rtx_SUBREG (mode, SUBREG_REG (x), SUBREG_WORD (x) + word));
+ {
+ int final_offset;
+
+ if (GET_MODE (SUBREG_REG (x)) == mode && subreg_lowpart_p (x))
+ return SUBREG_REG (x);
+
+ /* When working with SUBREGs the rule is that the byte
+ offset must be a multiple of the SUBREG's mode. */
+ final_offset = SUBREG_BYTE (x) + offset;
+ final_offset = (final_offset / GET_MODE_SIZE (mode));
+ final_offset = (final_offset * GET_MODE_SIZE (mode));
+ return gen_rtx_SUBREG (mode, SUBREG_REG (x), final_offset);
+ }
else if (GET_CODE (x) == REG)
{
- /* Let the backend decide how many registers to skip. This is needed
- in particular for Sparc64 where fp regs are smaller than a word. */
- /* ??? Note that subregs are now ambiguous, in that those against
- pseudos are sized by the Word Size, while those against hard
- regs are sized by the underlying register size. Better would be
- to always interpret the subreg offset parameter as bytes or bits. */
-
- if (WORDS_BIG_ENDIAN && REGNO (x) < FIRST_PSEUDO_REGISTER)
- word = (HARD_REGNO_NREGS (REGNO (x), GET_MODE (x))
- - HARD_REGNO_NREGS (REGNO (x), mode));
-
- /* If the register is not valid for MODE, return 0. If we don't
- do this, there is no way to fix up the resulting REG later.
- But we do do this if the current REG is not valid for its
- mode. This latter is a kludge, but is required due to the
- way that parameters are passed on some machines, most
- notably Sparc. */
- if (REGNO (x) < FIRST_PSEUDO_REGISTER
- && ! HARD_REGNO_MODE_OK (REGNO (x) + word, mode)
- && HARD_REGNO_MODE_OK (REGNO (x), GET_MODE (x)))
- return 0;
- else if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ /* Hard registers are done specially in certain cases. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER)
+ {
+ int final_regno = REGNO (x) +
+ subreg_regno_offset (REGNO (x), GET_MODE (x),
+ offset, mode);
+
+ /* If the final regno is not valid for MODE, punt. */
+ /* ??? We do allow it if the current REG is not valid for
+ ??? it's mode. It is a kludge to work around how float/complex
+ ??? arguments are passed on 32-bit Sparc and should be fixed. */
+ if (! HARD_REGNO_MODE_OK (final_regno, mode)
+ && HARD_REGNO_MODE_OK (REGNO (x), GET_MODE (x)))
+ return 0;
+
/* integrate.c can't handle parts of a return value register. */
- && (! REG_FUNCTION_VALUE_P (x)
+ if ((! REG_FUNCTION_VALUE_P (x)
|| ! rtx_equal_function_value_matters)
-#ifdef CLASS_CANNOT_CHANGE_SIZE
- && ! (GET_MODE_SIZE (mode) != GET_MODE_SIZE (GET_MODE (x))
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ && ! (CLASS_CANNOT_CHANGE_MODE_P (mode, GET_MODE (x))
&& GET_MODE_CLASS (GET_MODE (x)) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (GET_MODE (x)) != MODE_COMPLEX_FLOAT
&& (TEST_HARD_REG_BIT
- (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
REGNO (x))))
#endif
/* We want to keep the stack, frame, and arg pointers
&& x != arg_pointer_rtx
#endif
&& x != stack_pointer_rtx)
- return gen_rtx_REG (mode, REGNO (x) + word);
- else
- return gen_rtx_SUBREG (mode, x, word);
+ return gen_rtx_REG (mode, final_regno);
+ }
+ return gen_rtx_SUBREG (mode, x, offset);
}
/* If X is a CONST_INT or a CONST_DOUBLE, extract the appropriate bits
from the low-order part of the constant. */
{
/* If MODE is twice the host word size, X is already the desired
representation. Otherwise, if MODE is wider than a word, we can't
- do this. If MODE is exactly a word, return just one CONST_INT.
- If MODE is smaller than a word, clear the bits that don't belong
- in our mode, unless they and our sign bit are all one. So we get
- either a reasonable negative value or a reasonable unsigned value
- for this mode. */
+ do this. If MODE is exactly a word, return just one CONST_INT. */
if (GET_MODE_BITSIZE (mode) >= 2 * HOST_BITS_PER_WIDE_INT)
return x;
else
{
/* MODE must be narrower than HOST_BITS_PER_WIDE_INT. */
- int width = GET_MODE_BITSIZE (mode);
HOST_WIDE_INT val = (GET_CODE (x) == CONST_INT ? INTVAL (x)
: CONST_DOUBLE_LOW (x));
/* Sign extend to HOST_WIDE_INT. */
- val = val << (HOST_BITS_PER_WIDE_INT - width) >> (HOST_BITS_PER_WIDE_INT - width);
+ val = trunc_int_for_mode (val, mode);
return (GET_CODE (x) == CONST_INT && INTVAL (x) == val ? x
: GEN_INT (val));
}
}
+#ifndef REAL_ARITHMETIC
/* If X is an integral constant but we want it in floating-point, it
must be the case that we have a union of an integer and a floating-point
value. If the machine-parameters allow it, simulate that union here
&& GET_MODE_SIZE (mode) == UNITS_PER_WORD
&& GET_CODE (x) == CONST_INT
&& sizeof (float) * HOST_BITS_PER_CHAR == HOST_BITS_PER_WIDE_INT)
-#ifdef REAL_ARITHMETIC
- {
- REAL_VALUE_TYPE r;
- HOST_WIDE_INT i;
-
- i = INTVAL (x);
- r = REAL_VALUE_FROM_TARGET_SINGLE (i);
- return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
- }
-#else
{
union {HOST_WIDE_INT i; float d; } u;
u.i = INTVAL (x);
return CONST_DOUBLE_FROM_REAL_VALUE (u.d, mode);
}
-#endif
else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
&& HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
|| flag_pretend_float)
&& GET_MODE (x) == VOIDmode
&& (sizeof (double) * HOST_BITS_PER_CHAR
== 2 * HOST_BITS_PER_WIDE_INT))
-#ifdef REAL_ARITHMETIC
- {
- REAL_VALUE_TYPE r;
- HOST_WIDE_INT i[2];
- HOST_WIDE_INT low, high;
-
- if (GET_CODE (x) == CONST_INT)
- low = INTVAL (x), high = low >> (HOST_BITS_PER_WIDE_INT -1);
- else
- low = CONST_DOUBLE_LOW (x), high = CONST_DOUBLE_HIGH (x);
-
- /* REAL_VALUE_TARGET_DOUBLE takes the addressing order of the
- target machine. */
- if (WORDS_BIG_ENDIAN)
- i[0] = high, i[1] = low;
- else
- i[0] = low, i[1] = high;
-
- r = REAL_VALUE_FROM_TARGET_DOUBLE (i);
- return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
- }
-#else
{
union {HOST_WIDE_INT i[2]; double d; } u;
HOST_WIDE_INT low, high;
return CONST_DOUBLE_FROM_REAL_VALUE (u.d, mode);
}
-#endif
-
- /* We need an extra case for machines where HOST_BITS_PER_WIDE_INT is the
- same as sizeof (double) or when sizeof (float) is larger than the
- size of a word on the target machine. */
-#ifdef REAL_ARITHMETIC
- else if (mode == SFmode && GET_CODE (x) == CONST_INT)
- {
- REAL_VALUE_TYPE r;
- HOST_WIDE_INT i;
-
- i = INTVAL (x);
- r = REAL_VALUE_FROM_TARGET_SINGLE (i);
- return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
- }
- else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
- && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
- || flag_pretend_float)
- && GET_MODE_CLASS (mode) == MODE_FLOAT
- && GET_MODE_SIZE (mode) == UNITS_PER_WORD
- && GET_CODE (x) == CONST_INT
- && (sizeof (double) * HOST_BITS_PER_CHAR
- == HOST_BITS_PER_WIDE_INT))
- {
- REAL_VALUE_TYPE r;
- HOST_WIDE_INT i;
-
- i = INTVAL (x);
- r = REAL_VALUE_FROM_TARGET_DOUBLE (&i);
- return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
- }
-#endif
/* Similarly, if this is converting a floating-point value into a
single-word integer. Only do this is the host and target parameters are
&& GET_CODE (x) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == BITS_PER_WORD)
- return operand_subword (x, word, 0, GET_MODE (x));
+ return constant_subword (x, (offset / UNITS_PER_WORD), GET_MODE (x));
/* Similarly, if this is converting a floating-point value into a
two-word integer, we can do this one word at a time and make an
&& GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == 2 * BITS_PER_WORD)
{
- rtx lowpart
- = operand_subword (x, word + WORDS_BIG_ENDIAN, 0, GET_MODE (x));
- rtx highpart
- = operand_subword (x, word + ! WORDS_BIG_ENDIAN, 0, GET_MODE (x));
-
+ rtx lowpart, highpart;
+
+ lowpart = constant_subword (x,
+ (offset / UNITS_PER_WORD) + WORDS_BIG_ENDIAN,
+ GET_MODE (x));
+ highpart = constant_subword (x,
+ (offset / UNITS_PER_WORD) + (! WORDS_BIG_ENDIAN),
+ GET_MODE (x));
if (lowpart && GET_CODE (lowpart) == CONST_INT
&& highpart && GET_CODE (highpart) == CONST_INT)
return immed_double_const (INTVAL (lowpart), INTVAL (highpart), mode);
}
+#else /* ifndef REAL_ARITHMETIC */
+
+ /* When we have a FP emulator, we can handle all conversions between
+ FP and integer operands. This simplifies reload because it
+ doesn't have to deal with constructs like (subreg:DI
+ (const_double:SF ...)) or (subreg:DF (const_int ...)). */
+
+ else if (mode == SFmode
+ && GET_CODE (x) == CONST_INT)
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i;
+
+ i = INTVAL (x);
+ r = REAL_VALUE_FROM_TARGET_SINGLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+ else if (mode == DFmode
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ && GET_MODE (x) == VOIDmode)
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i[2];
+ HOST_WIDE_INT low, high;
+
+ if (GET_CODE (x) == CONST_INT)
+ {
+ low = INTVAL (x);
+ high = low >> (HOST_BITS_PER_WIDE_INT - 1);
+ }
+ else
+ {
+ low = CONST_DOUBLE_LOW (x);
+ high = CONST_DOUBLE_HIGH (x);
+ }
+
+ /* REAL_VALUE_TARGET_DOUBLE takes the addressing order of the
+ target machine. */
+ if (WORDS_BIG_ENDIAN)
+ i[0] = high, i[1] = low;
+ else
+ i[0] = low, i[1] = high;
+
+ r = REAL_VALUE_FROM_TARGET_DOUBLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+ else if ((GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE r;
+ long i[4]; /* Only the low 32 bits of each 'long' are used. */
+ int endian = WORDS_BIG_ENDIAN ? 1 : 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ switch (GET_MODE (x))
+ {
+ case SFmode:
+ REAL_VALUE_TO_TARGET_SINGLE (r, i[endian]);
+ i[1 - endian] = 0;
+ break;
+ case DFmode:
+ REAL_VALUE_TO_TARGET_DOUBLE (r, i);
+ break;
+#if LONG_DOUBLE_TYPE_SIZE == 96
+ case XFmode:
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, i + endian);
+ i[3-3*endian] = 0;
+#else
+ case TFmode:
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, i);
+#endif
+ break;
+ default:
+ abort ();
+ }
+
+ /* Now, pack the 32-bit elements of the array into a CONST_DOUBLE
+ and return it. */
+#if HOST_BITS_PER_WIDE_INT == 32
+ return immed_double_const (i[endian], i[1 - endian], mode);
+#else
+ {
+ int c;
+
+ if (HOST_BITS_PER_WIDE_INT != 64)
+ abort ();
+
+ for (c = 0; c < 4; c++)
+ i[c] &= ~ (0L);
+
+ switch (GET_MODE (x))
+ {
+ case SFmode:
+ case DFmode:
+ return immed_double_const (((unsigned long) i[endian]) |
+ (((HOST_WIDE_INT) i[1-endian]) << 32),
+ 0, mode);
+ default:
+ return immed_double_const (((unsigned long) i[endian*3]) |
+ (((HOST_WIDE_INT) i[1+endian]) << 32),
+ ((unsigned long) i[2-endian]) |
+ (((HOST_WIDE_INT) i[3-endian*3]) << 32),
+ mode);
+ }
+ }
+#endif
+ }
+#endif /* ifndef REAL_ARITHMETIC */
/* Otherwise, we can't do this. */
return 0;
&& GET_MODE_BITSIZE (mode) < BITS_PER_WORD
&& REG_P (x)
&& REGNO (x) < FIRST_PSEUDO_REGISTER)
- fatal ("Unable to access real part of complex value in a hard register on this target");
+ internal_error
+ ("Can't access real part of complex value in hard register");
else if (WORDS_BIG_ENDIAN)
return gen_highpart (mode, x);
else
return XEXP (x, 1);
else if (WORDS_BIG_ENDIAN)
return gen_lowpart (mode, x);
- else if (!WORDS_BIG_ENDIAN
+ else if (! WORDS_BIG_ENDIAN
&& GET_MODE_BITSIZE (mode) < BITS_PER_WORD
&& REG_P (x)
&& REGNO (x) < FIRST_PSEUDO_REGISTER)
- fatal ("Unable to access imaginary part of complex value in a hard register on this target");
+ internal_error
+ ("can't access imaginary part of complex value in hard register");
else
return gen_highpart (mode, x);
}
if (GET_CODE (x) != SUBREG)
abort ();
- return SUBREG_WORD (x) * UNITS_PER_WORD < GET_MODE_UNIT_SIZE (GET_MODE (SUBREG_REG (x)));
+ return ((unsigned int) SUBREG_BYTE (x)
+ < GET_MODE_UNIT_SIZE (GET_MODE (SUBREG_REG (x))));
}
\f
/* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a value,
enum machine_mode mode;
register rtx x;
{
+ unsigned int msize = GET_MODE_SIZE (mode);
+ unsigned int xsize = GET_MODE_SIZE (GET_MODE (x));
+
/* This case loses if X is a subreg. To catch bugs early,
complain if an invalid MODE is used even in other cases. */
- if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
- && GET_MODE_SIZE (mode) != GET_MODE_UNIT_SIZE (GET_MODE (x)))
+ if (msize > UNITS_PER_WORD
+ && msize != GET_MODE_UNIT_SIZE (GET_MODE (x)))
abort ();
if (GET_CODE (x) == CONST_DOUBLE
#if !(TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT || defined (REAL_IS_NOT_DOUBLE))
else if (GET_CODE (x) == MEM)
{
register int offset = 0;
+
if (! WORDS_BIG_ENDIAN)
- offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
- - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+ offset = (MAX (xsize, UNITS_PER_WORD)
+ - MAX (msize, UNITS_PER_WORD));
if (! BYTES_BIG_ENDIAN
- && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
- offset -= (GET_MODE_SIZE (mode)
- - MIN (UNITS_PER_WORD,
- GET_MODE_SIZE (GET_MODE (x))));
+ && msize < UNITS_PER_WORD)
+ offset -= (msize - MIN (UNITS_PER_WORD, xsize));
return change_address (x, mode, plus_constant (XEXP (x, 0), offset));
}
/* The only time this should occur is when we are looking at a
multi-word item with a SUBREG whose mode is the same as that of the
item. It isn't clear what we would do if it wasn't. */
- if (SUBREG_WORD (x) != 0)
+ if (SUBREG_BYTE (x) != 0)
abort ();
return gen_highpart (mode, SUBREG_REG (x));
}
else if (GET_CODE (x) == REG)
{
- int word;
+ int offset = 0;
- /* Let the backend decide how many registers to skip. This is needed
- in particular for sparc64 where fp regs are smaller than a word. */
- /* ??? Note that subregs are now ambiguous, in that those against
- pseudos are sized by the word size, while those against hard
- regs are sized by the underlying register size. Better would be
- to always interpret the subreg offset parameter as bytes or bits. */
+ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode))
+ abort ();
- if (WORDS_BIG_ENDIAN)
- word = 0;
- else if (REGNO (x) < FIRST_PSEUDO_REGISTER)
- word = (HARD_REGNO_NREGS (REGNO (x), GET_MODE (x))
- - HARD_REGNO_NREGS (REGNO (x), mode));
- else
- word = ((GET_MODE_SIZE (GET_MODE (x))
- - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
- / UNITS_PER_WORD);
-
- if (REGNO (x) < FIRST_PSEUDO_REGISTER
- /* integrate.c can't handle parts of a return value register. */
- && (! REG_FUNCTION_VALUE_P (x)
- || ! rtx_equal_function_value_matters)
+ if ((! WORDS_BIG_ENDIAN || ! BYTES_BIG_ENDIAN)
+ && xsize > msize)
+ {
+ int difference = xsize - msize;
+
+ if (! WORDS_BIG_ENDIAN)
+ offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
+ if (! BYTES_BIG_ENDIAN)
+ offset += difference % UNITS_PER_WORD;
+ }
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER)
+ {
+ int final_regno = REGNO (x) +
+ subreg_regno_offset (REGNO (x), GET_MODE (x), offset, mode);
+
+ /* integrate.c can't handle parts of a return value register.
+ ??? Then integrate.c should be fixed!
+ ??? What about CLASS_CANNOT_CHANGE_SIZE? */
+ if ((! REG_FUNCTION_VALUE_P (x)
+ || ! rtx_equal_function_value_matters)
/* We want to keep the stack, frame, and arg pointers special. */
- && x != frame_pointer_rtx
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- && x != arg_pointer_rtx
+ && x != frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && x != arg_pointer_rtx
#endif
- && x != stack_pointer_rtx)
- return gen_rtx_REG (mode, REGNO (x) + word);
- else
- return gen_rtx_SUBREG (mode, x, word);
+ && x != stack_pointer_rtx)
+ return gen_rtx_REG (mode, final_regno);
+ }
+ /* Just generate a normal SUBREG. */
+ return gen_rtx_SUBREG (mode, x, offset);
}
else
abort ();
subreg_lowpart_p (x)
rtx x;
{
+ unsigned int offset = 0;
+ int difference = (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ - GET_MODE_SIZE (GET_MODE (x)));
+
if (GET_CODE (x) != SUBREG)
return 1;
else if (GET_MODE (SUBREG_REG (x)) == VOIDmode)
return 0;
- if (WORDS_BIG_ENDIAN
- && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) > UNITS_PER_WORD)
- return (SUBREG_WORD (x)
- == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
- - MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD))
- / UNITS_PER_WORD));
+ if (difference > 0)
+ {
+ if (WORDS_BIG_ENDIAN)
+ offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset += difference % UNITS_PER_WORD;
+ }
- return SUBREG_WORD (x) == 0;
+ return SUBREG_BYTE (x) == offset;
}
\f
-/* Return subword I of operand OP.
- The word number, I, is interpreted as the word number starting at the
- low-order address. Word 0 is the low-order word if not WORDS_BIG_ENDIAN,
- otherwise it is the high-order word.
- If we cannot extract the required word, we return zero. Otherwise, an
- rtx corresponding to the requested word will be returned.
-
- VALIDATE_ADDRESS is nonzero if the address should be validated. Before
- reload has completed, a valid address will always be returned. After
- reload, if a valid address cannot be returned, we return zero.
-
- If VALIDATE_ADDRESS is zero, we simply form the required address; validating
- it is the responsibility of the caller.
-
- MODE is the mode of OP in case it is a CONST_INT. */
+/* Helper routine for all the constant cases of operand_subword.
+ Some places invoke this directly. */
rtx
-operand_subword (op, i, validate_address, mode)
+constant_subword (op, offset, mode)
rtx op;
- int i;
- int validate_address;
+ int offset;
enum machine_mode mode;
{
- HOST_WIDE_INT val;
int size_ratio = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
-
- if (mode == VOIDmode)
- mode = GET_MODE (op);
-
- if (mode == VOIDmode)
- abort ();
-
- /* If OP is narrower than a word, fail. */
- if (mode != BLKmode
- && (GET_MODE_SIZE (mode) < UNITS_PER_WORD))
- return 0;
-
- /* If we want a word outside OP, return zero. */
- if (mode != BLKmode
- && (i + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode))
- return const0_rtx;
+ HOST_WIDE_INT val;
/* If OP is already an integer word, return it. */
if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) == UNITS_PER_WORD)
return op;
- /* If OP is a REG or SUBREG, we can handle it very simply. */
- if (GET_CODE (op) == REG)
- {
- /* ??? There is a potential problem with this code. It does not
- properly handle extractions of a subword from a hard register
- that is larger than word_mode. Presumably the check for
- HARD_REGNO_MODE_OK catches these most of these cases. */
-
- /* If OP is a hard register, but OP + I is not a hard register,
- then extracting a subword is impossible.
-
- For example, consider if OP is the last hard register and it is
- larger than word_mode. If we wanted word N (for N > 0) because a
- part of that hard register was known to contain a useful value,
- then OP + I would refer to a pseudo, not the hard register we
- actually wanted. */
- if (REGNO (op) < FIRST_PSEUDO_REGISTER
- && REGNO (op) + i >= FIRST_PSEUDO_REGISTER)
- return 0;
-
- /* If the register is not valid for MODE, return 0. Note we
- have to check both OP and OP + I since they may refer to
- different parts of the register file.
-
- Consider if OP refers to the last 96bit FP register and we want
- subword 3 because that subword is known to contain a value we
- needed. */
- if (REGNO (op) < FIRST_PSEUDO_REGISTER
- && (! HARD_REGNO_MODE_OK (REGNO (op), word_mode)
- || ! HARD_REGNO_MODE_OK (REGNO (op) + i, word_mode)))
- return 0;
- else if (REGNO (op) >= FIRST_PSEUDO_REGISTER
- || (REG_FUNCTION_VALUE_P (op)
- && rtx_equal_function_value_matters)
- /* We want to keep the stack, frame, and arg pointers
- special. */
- || op == frame_pointer_rtx
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- || op == arg_pointer_rtx
-#endif
- || op == stack_pointer_rtx)
- return gen_rtx_SUBREG (word_mode, op, i);
- else
- return gen_rtx_REG (word_mode, REGNO (op) + i);
- }
- else if (GET_CODE (op) == SUBREG)
- return gen_rtx_SUBREG (word_mode, SUBREG_REG (op), i + SUBREG_WORD (op));
- else if (GET_CODE (op) == CONCAT)
- {
- int partwords = GET_MODE_UNIT_SIZE (GET_MODE (op)) / UNITS_PER_WORD;
- if (i < partwords)
- return operand_subword (XEXP (op, 0), i, validate_address, mode);
- return operand_subword (XEXP (op, 1), i - partwords,
- validate_address, mode);
- }
-
- /* Form a new MEM at the requested address. */
- if (GET_CODE (op) == MEM)
- {
- rtx addr = plus_constant (XEXP (op, 0), i * UNITS_PER_WORD);
- rtx new;
-
- if (validate_address)
- {
- if (reload_completed)
- {
- if (! strict_memory_address_p (word_mode, addr))
- return 0;
- }
- else
- addr = memory_address (word_mode, addr);
- }
-
- new = gen_rtx_MEM (word_mode, addr);
-
- MEM_COPY_ATTRIBUTES (new, op);
- RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
- MEM_ALIAS_SET (new) = MEM_ALIAS_SET (op);
-
- return new;
- }
-
- /* The only remaining cases are when OP is a constant. If the host and
- target floating formats are the same, handling two-word floating
- constants are easy. Note that REAL_VALUE_TO_TARGET_{SINGLE,DOUBLE}
- are defined as returning one or two 32 bit values, respectively,
- and not values of BITS_PER_WORD bits. */
#ifdef REAL_ARITHMETIC
-/* The output is some bits, the width of the target machine's word.
- A wider-word host can surely hold them in a CONST_INT. A narrower-word
- host can't. */
+ /* The output is some bits, the width of the target machine's word.
+ A wider-word host can surely hold them in a CONST_INT. A narrower-word
+ host can't. */
if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
&& GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == 64
So we explicitly mask and sign-extend as necessary. */
if (BITS_PER_WORD == 32)
{
- val = k[i];
+ val = k[offset];
val = ((val & 0xffffffff) ^ 0x80000000) - 0x80000000;
return GEN_INT (val);
}
#if HOST_BITS_PER_WIDE_INT >= 64
- else if (BITS_PER_WORD >= 64 && i == 0)
+ else if (BITS_PER_WORD >= 64 && offset == 0)
{
val = k[! WORDS_BIG_ENDIAN];
val = (((val & 0xffffffff) ^ 0x80000000) - 0x80000000) << 32;
#endif
else if (BITS_PER_WORD == 16)
{
- val = k[i >> 1];
- if ((i & 1) == !WORDS_BIG_ENDIAN)
+ val = k[offset >> 1];
+ if ((offset & 1) == ! WORDS_BIG_ENDIAN)
val >>= 16;
- val &= 0xffff;
+ val = ((val & 0xffff) ^ 0x8000) - 0x8000;
return GEN_INT (val);
}
else
&& GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) > 64
&& GET_CODE (op) == CONST_DOUBLE)
- {
- long k[4];
- REAL_VALUE_TYPE rv;
+ {
+ long k[4];
+ REAL_VALUE_TYPE rv;
- REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
- REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
- if (BITS_PER_WORD == 32)
- {
- val = k[i];
- val = ((val & 0xffffffff) ^ 0x80000000) - 0x80000000;
- return GEN_INT (val);
- }
- else
- abort ();
- }
+ if (BITS_PER_WORD == 32)
+ {
+ val = k[offset];
+ val = ((val & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ return GEN_INT (val);
+ }
+#if HOST_BITS_PER_WIDE_INT >= 64
+ else if (BITS_PER_WORD >= 64 && offset <= 1)
+ {
+ val = k[offset * 2 + ! WORDS_BIG_ENDIAN];
+ val = (((val & 0xffffffff) ^ 0x80000000) - 0x80000000) << 32;
+ val |= (HOST_WIDE_INT) k[offset * 2 + WORDS_BIG_ENDIAN] & 0xffffffff;
+ return GEN_INT (val);
+ }
+#endif
+ else
+ abort ();
+ }
#else /* no REAL_ARITHMETIC */
if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
&& HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
compilers don't like a conditional inside macro args, so we have two
copies of the return. */
#ifdef HOST_WORDS_BIG_ENDIAN
- return GEN_INT (i == WORDS_BIG_ENDIAN
+ return GEN_INT (offset == WORDS_BIG_ENDIAN
? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op));
#else
- return GEN_INT (i != WORDS_BIG_ENDIAN
+ return GEN_INT (offset != WORDS_BIG_ENDIAN
? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op));
#endif
}
if (BITS_PER_WORD == 16)
{
- if ((i & 1) == !WORDS_BIG_ENDIAN)
+ if ((offset & 1) == ! WORDS_BIG_ENDIAN)
val >>= 16;
- val &= 0xffff;
+ val = ((val & 0xffff) ^ 0x8000) - 0x8000;
}
return GEN_INT (val);
/* The only remaining cases that we can handle are integers.
Convert to proper endianness now since these cases need it.
- At this point, i == 0 means the low-order word.
+ At this point, offset == 0 means the low-order word.
We do not want to handle the case when BITS_PER_WORD <= HOST_BITS_PER_INT
in general. However, if OP is (const_int 0), we can just return
return 0;
if (WORDS_BIG_ENDIAN)
- i = GET_MODE_SIZE (mode) / UNITS_PER_WORD - 1 - i;
+ offset = GET_MODE_SIZE (mode) / UNITS_PER_WORD - 1 - offset;
/* Find out which word on the host machine this value is in and get
it from the constant. */
- val = (i / size_ratio == 0
+ val = (offset / size_ratio == 0
? (GET_CODE (op) == CONST_INT ? INTVAL (op) : CONST_DOUBLE_LOW (op))
: (GET_CODE (op) == CONST_INT
? (INTVAL (op) < 0 ? ~0 : 0) : CONST_DOUBLE_HIGH (op)));
/* Get the value we want into the low bits of val. */
if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT)
- val = ((val >> ((i % size_ratio) * BITS_PER_WORD)));
+ val = ((val >> ((offset % size_ratio) * BITS_PER_WORD)));
val = trunc_int_for_mode (val, word_mode);
return GEN_INT (val);
}
+/* Return subword OFFSET of operand OP.
+ The word number, OFFSET, is interpreted as the word number starting
+ at the low-order address. OFFSET 0 is the low-order word if not
+ WORDS_BIG_ENDIAN, otherwise it is the high-order word.
+
+ If we cannot extract the required word, we return zero. Otherwise,
+ an rtx corresponding to the requested word will be returned.
+
+ VALIDATE_ADDRESS is nonzero if the address should be validated. Before
+ reload has completed, a valid address will always be returned. After
+ reload, if a valid address cannot be returned, we return zero.
+
+ If VALIDATE_ADDRESS is zero, we simply form the required address; validating
+ it is the responsibility of the caller.
+
+ MODE is the mode of OP in case it is a CONST_INT.
+
+ ??? This is still rather broken for some cases. The problem for the
+ moment is that all callers of this thing provide no 'goal mode' to
+ tell us to work with. This exists because all callers were written
+ in a word based SUBREG world. */
+
+rtx
+operand_subword (op, offset, validate_address, mode)
+ rtx op;
+ unsigned int offset;
+ int validate_address;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode == VOIDmode)
+ abort ();
+
+ /* If OP is narrower than a word, fail. */
+ if (mode != BLKmode
+ && (GET_MODE_SIZE (mode) < UNITS_PER_WORD))
+ return 0;
+
+ /* If we want a word outside OP, return zero. */
+ if (mode != BLKmode
+ && (offset + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode))
+ return const0_rtx;
+
+ switch (GET_CODE (op))
+ {
+ case REG:
+ case SUBREG:
+ case CONCAT:
+ case MEM:
+ break;
+
+ default:
+ /* The only remaining cases are when OP is a constant. If the host and
+ target floating formats are the same, handling two-word floating
+ constants are easy. Note that REAL_VALUE_TO_TARGET_{SINGLE,DOUBLE}
+ are defined as returning one or two 32 bit values, respectively,
+ and not values of BITS_PER_WORD bits. */
+ return constant_subword (op, offset, mode);
+ }
+
+ /* If OP is already an integer word, return it. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD)
+ return op;
+
+ /* If OP is a REG or SUBREG, we can handle it very simply. */
+ if (GET_CODE (op) == REG)
+ {
+ if (REGNO (op) < FIRST_PSEUDO_REGISTER)
+ {
+ int final_regno = REGNO (op) +
+ subreg_regno_offset (REGNO (op), GET_MODE (op),
+ offset * UNITS_PER_WORD,
+ word_mode);
+
+ /* If the register is not valid for MODE, return 0. If we don't
+ do this, there is no way to fix up the resulting REG later. */
+ if (! HARD_REGNO_MODE_OK (final_regno, word_mode))
+ return 0;
+
+ /* integrate.c can't handle parts of a return value register.
+ ??? Then integrate.c should be fixed!
+ ??? What about CLASS_CANNOT_CHANGE_SIZE? */
+ if ((! REG_FUNCTION_VALUE_P (op)
+ || ! rtx_equal_function_value_matters)
+ /* ??? What about CLASS_CANNOT_CHANGE_SIZE? */
+ /* We want to keep the stack, frame, and arg pointers
+ special. */
+ && op != frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && op != arg_pointer_rtx
+#endif
+ && op != stack_pointer_rtx)
+ return gen_rtx_REG (word_mode, final_regno);
+ }
+
+ /* Just return a normal SUBREG. */
+ return gen_rtx_SUBREG (word_mode, op,
+ (offset * UNITS_PER_WORD));
+ }
+ else if (GET_CODE (op) == SUBREG)
+ {
+ int final_offset = ((offset * UNITS_PER_WORD) + SUBREG_BYTE (op));
+
+ /* When working with SUBREGs the rule is that the byte
+ offset must be a multiple of the SUBREG's mode. */
+ final_offset = (final_offset / GET_MODE_SIZE (word_mode));
+ final_offset = (final_offset * GET_MODE_SIZE (word_mode));
+ return gen_rtx_SUBREG (word_mode, SUBREG_REG (op), final_offset);
+ }
+ else if (GET_CODE (op) == CONCAT)
+ {
+ unsigned int partwords = GET_MODE_UNIT_SIZE (GET_MODE (op)) / UNITS_PER_WORD;
+ if (offset < partwords)
+ return operand_subword (XEXP (op, 0), offset, validate_address, mode);
+ return operand_subword (XEXP (op, 1), offset - partwords,
+ validate_address, mode);
+ }
+
+ /* Form a new MEM at the requested address. */
+ if (GET_CODE (op) == MEM)
+ {
+ rtx addr = plus_constant (XEXP (op, 0), (offset * UNITS_PER_WORD));
+ rtx new;
+
+ if (validate_address)
+ {
+ if (reload_completed)
+ {
+ if (! strict_memory_address_p (word_mode, addr))
+ return 0;
+ }
+ else
+ addr = memory_address (word_mode, addr);
+ }
+
+ new = gen_rtx_MEM (word_mode, addr);
+ MEM_COPY_ATTRIBUTES (new, op);
+ return new;
+ }
+
+ /* Unreachable... (famous last words) */
+ abort ();
+}
+
/* Similar to `operand_subword', but never return 0. If we can't extract
the required subword, put OP into a register and try again. If that fails,
- abort. We always validate the address in this case. It is not valid
- to call this function after reload; it is mostly meant for RTL
- generation.
+ abort. We always validate the address in this case.
MODE is the mode of OP, in case it is CONST_INT. */
rtx
-operand_subword_force (op, i, mode)
+operand_subword_force (op, offset, mode)
rtx op;
- int i;
+ unsigned int offset;
enum machine_mode mode;
{
- rtx result = operand_subword (op, i, 1, mode);
+ rtx result = operand_subword (op, offset, 1, mode);
if (result)
return result;
op = force_reg (mode, op);
}
- result = operand_subword (op, i, 1, mode);
+ result = operand_subword (op, offset, 1, mode);
if (result == 0)
abort ();
}
else
{
- rtx new = gen_rtx_COMPARE (VOIDmode, CONST0_RTX (GET_MODE (comp)), comp);
+ rtx new = gen_rtx_COMPARE (VOIDmode,
+ CONST0_RTX (GET_MODE (comp)), comp);
if (GET_CODE (body) == SET)
SET_SRC (body) = new;
else
return memref;
new = gen_rtx_MEM (mode, addr);
- RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (memref);
MEM_COPY_ATTRIBUTES (new, memref);
- MEM_ALIAS_SET (new) = MEM_ALIAS_SET (memref);
return new;
}
\f
register rtx label;
label = gen_rtx_CODE_LABEL (VOIDmode, 0, NULL_RTX,
- NULL_RTX, label_num++, NULL_PTR);
+ NULL_RTX, label_num++, NULL, NULL);
LABEL_NUSES (label) = 0;
+ LABEL_ALTERNATE_NAME (label) = NULL;
return label;
}
\f
/* Restore all variables describing the current status from the structure *P.
This is used after a nested function. */
-void
-restore_emit_status (p)
- struct function *p;
-{
- last_label_num = 0;
- clear_emit_caches ();
+void
+restore_emit_status (p)
+ struct function *p ATTRIBUTE_UNUSED;
+{
+ last_label_num = 0;
+ clear_emit_caches ();
+}
+
+/* Clear out all parts of the state in F that can safely be discarded
+ after the function has been compiled, to let garbage collection
+ reclaim the memory. */
+
+void
+free_emit_status (f)
+ struct function *f;
+{
+ free (f->emit->x_regno_reg_rtx);
+ free (f->emit->regno_pointer_align);
+ free (f->emit);
+ f->emit = NULL;
+}
+\f
+/* Go through all the RTL insn bodies and copy any invalid shared
+ structure. This routine should only be called once. */
+
+void
+unshare_all_rtl (fndecl, insn)
+ tree fndecl;
+ rtx insn;
+{
+ tree decl;
+
+ /* Make sure that virtual parameters are not shared. */
+ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl))
+ SET_DECL_RTL (decl, copy_rtx_if_shared (DECL_RTL (decl)));
+
+ /* Make sure that virtual stack slots are not shared. */
+ unshare_all_decls (DECL_INITIAL (fndecl));
+
+ /* Unshare just about everything else. */
+ unshare_all_rtl_1 (insn);
+
+ /* Make sure the addresses of stack slots found outside the insn chain
+ (such as, in DECL_RTL of a variable) are not shared
+ with the insn chain.
+
+ This special care is necessary when the stack slot MEM does not
+ actually appear in the insn chain. If it does appear, its address
+ is unshared from all else at that point. */
+ stack_slot_list = copy_rtx_if_shared (stack_slot_list);
}
-/* Clear out all parts of our state in F that can safely be discarded
- after the function has been compiled, to let garbage collection
- reclaim the memory. */
+/* Go through all the RTL insn bodies and copy any invalid shared
+ structure, again. This is a fairly expensive thing to do so it
+ should be done sparingly. */
+
void
-free_emit_status (f)
- struct function *f;
+unshare_all_rtl_again (insn)
+ rtx insn;
{
- free (f->emit->x_regno_reg_rtx);
- free (f->emit->regno_pointer_flag);
- free (f->emit->regno_pointer_align);
- f->emit->x_regno_reg_rtx = 0;
+ rtx p;
+ tree decl;
+
+ for (p = insn; p; p = NEXT_INSN (p))
+ if (INSN_P (p))
+ {
+ reset_used_flags (PATTERN (p));
+ reset_used_flags (REG_NOTES (p));
+ reset_used_flags (LOG_LINKS (p));
+ }
+
+ /* Make sure that virtual stack slots are not shared. */
+ reset_used_decls (DECL_INITIAL (cfun->decl));
+
+ /* Make sure that virtual parameters are not shared. */
+ for (decl = DECL_ARGUMENTS (cfun->decl); decl; decl = TREE_CHAIN (decl))
+ reset_used_flags (DECL_RTL (decl));
+
+ reset_used_flags (stack_slot_list);
+
+ unshare_all_rtl (cfun->decl, insn);
}
-\f
+
/* Go through all the RTL insn bodies and copy any invalid shared structure.
- It does not work to do this twice, because the mark bits set here
- are not cleared afterwards. */
+ Assumes the mark bits are cleared at entry. */
-void
-unshare_all_rtl (insn)
- register rtx insn;
+static void
+unshare_all_rtl_1 (insn)
+ rtx insn;
{
for (; insn; insn = NEXT_INSN (insn))
- if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
- || GET_CODE (insn) == CALL_INSN)
+ if (INSN_P (insn))
{
PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn));
REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn));
LOG_LINKS (insn) = copy_rtx_if_shared (LOG_LINKS (insn));
}
+}
- /* Make sure the addresses of stack slots found outside the insn chain
- (such as, in DECL_RTL of a variable) are not shared
- with the insn chain.
+/* Go through all virtual stack slots of a function and copy any
+ shared structure. */
+static void
+unshare_all_decls (blk)
+ tree blk;
+{
+ tree t;
- This special care is necessary when the stack slot MEM does not
- actually appear in the insn chain. If it does appear, its address
- is unshared from all else at that point. */
+ /* Copy shared decls. */
+ for (t = BLOCK_VARS (blk); t; t = TREE_CHAIN (t))
+ if (DECL_RTL_SET_P (t))
+ SET_DECL_RTL (t, copy_rtx_if_shared (DECL_RTL (t)));
+
+ /* Now process sub-blocks. */
+ for (t = BLOCK_SUBBLOCKS (blk); t; t = TREE_CHAIN (t))
+ unshare_all_decls (t);
+}
+
+/* Go through all virtual stack slots of a function and mark them as
+ not shared. */
+static void
+reset_used_decls (blk)
+ tree blk;
+{
+ tree t;
- copy_rtx_if_shared (stack_slot_list);
+ /* Mark decls. */
+ for (t = BLOCK_VARS (blk); t; t = TREE_CHAIN (t))
+ if (DECL_RTL_SET_P (t))
+ reset_used_flags (DECL_RTL (t));
+
+ /* Now process sub-blocks. */
+ for (t = BLOCK_SUBBLOCKS (blk); t; t = TREE_CHAIN (t))
+ reset_used_decls (t);
}
/* Mark ORIG as in use, and return a copy of it if it was already in use.
register rtx copy;
copy = rtx_alloc (code);
- bcopy ((char *) x, (char *) copy,
+ memcpy (copy, x,
(sizeof (*copy) - sizeof (copy->fld)
+ sizeof (copy->fld[0]) * GET_RTX_LENGTH (code)));
x = copy;
{
return cur_insn_uid;
}
+
+/* Renumber instructions so that no instruction UIDs are wasted. */
+
+void
+renumber_insns (stream)
+ FILE *stream;
+{
+ rtx insn;
+
+ /* If we're not supposed to renumber instructions, don't. */
+ if (!flag_renumber_insns)
+ return;
+
+ /* If there aren't that many instructions, then it's not really
+ worth renumbering them. */
+ if (flag_renumber_insns == 1 && get_max_uid () < 25000)
+ return;
+
+ cur_insn_uid = 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (stream)
+ fprintf (stream, "Renumbering insn %d to %d\n",
+ INSN_UID (insn), cur_insn_uid);
+ INSN_UID (insn) = cur_insn_uid++;
+ }
+}
\f
/* Return the next insn. If it is a SEQUENCE, return the first insn
of the sequence. */
does not look inside SEQUENCEs. Until reload has completed, this is the
same as next_real_insn. */
+int
+active_insn_p (insn)
+ rtx insn;
+{
+ return (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (! reload_completed
+ || (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))));
+}
+
rtx
next_active_insn (insn)
rtx insn;
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0
- || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
- || (GET_CODE (insn) == INSN
- && (! reload_completed
- || (GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER))))
+ if (insn == 0 || active_insn_p (insn))
break;
}
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0
- || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
- || (GET_CODE (insn) == INSN
- && (! reload_completed
- || (GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER))))
+ if (insn == 0 || active_insn_p (insn))
break;
}
if (GET_CODE (user) == INSN && GET_CODE (PATTERN (user)) == SEQUENCE)
user = XVECEXP (PATTERN (user), 0, 0);
- REG_NOTES (user) = gen_rtx_INSN_LIST (REG_CC_SETTER, insn, REG_NOTES (user));
+ REG_NOTES (user) = gen_rtx_INSN_LIST (REG_CC_SETTER, insn,
+ REG_NOTES (user));
REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_CC_USER, user, REG_NOTES (insn));
}
if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
insn = XVECEXP (PATTERN (insn), 0, 0);
- if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i'
- && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
+ if (insn && INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
return insn;
return 0;
return insn;
}
#endif
+
+/* Increment the label uses for all labels present in rtx. */
+
+static void
+mark_label_nuses(x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i, j;
+ register const char *fmt;
+
+ code = GET_CODE (x);
+ if (code == LABEL_REF)
+ LABEL_NUSES (XEXP (x, 0))++;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_label_nuses (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ mark_label_nuses (XVECEXP (x, i, j));
+ }
+}
+
\f
/* Try splitting insns that can be split for better scheduling.
PAT is the pattern which might split.
it, in turn, will be split (SFmode on the 29k is an example). */
if (GET_CODE (seq) == SEQUENCE)
{
- /* If we are splitting a JUMP_INSN, look for the JUMP_INSN in
- SEQ and copy our JUMP_LABEL to it. If JUMP_LABEL is non-zero,
- increment the usage count so we don't delete the label. */
int i;
-
- if (GET_CODE (trial) == JUMP_INSN)
+ rtx eh_note;
+
+ /* Avoid infinite loop if any insn of the result matches
+ the original pattern. */
+ for (i = 0; i < XVECLEN (seq, 0); i++)
+ if (GET_CODE (XVECEXP (seq, 0, i)) == INSN
+ && rtx_equal_p (PATTERN (XVECEXP (seq, 0, i)), pat))
+ return trial;
+
+ /* Mark labels. */
+ for (i = XVECLEN (seq, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (seq, 0, i)) == JUMP_INSN)
+ mark_jump_label (PATTERN (XVECEXP (seq, 0, i)),
+ XVECEXP (seq, 0, i), 0, 0);
+
+ /* If we are splitting a CALL_INSN, look for the CALL_INSN
+ in SEQ and copy our CALL_INSN_FUNCTION_USAGE to it. */
+ if (GET_CODE (trial) == CALL_INSN)
for (i = XVECLEN (seq, 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (seq, 0, i)) == JUMP_INSN)
- {
- JUMP_LABEL (XVECEXP (seq, 0, i)) = JUMP_LABEL (trial);
-
- if (JUMP_LABEL (trial))
- LABEL_NUSES (JUMP_LABEL (trial))++;
- }
+ if (GET_CODE (XVECEXP (seq, 0, i)) == CALL_INSN)
+ CALL_INSN_FUNCTION_USAGE (XVECEXP (seq, 0, i))
+ = CALL_INSN_FUNCTION_USAGE (trial);
+
+ /* Copy EH notes. */
+ if ((eh_note = find_reg_note (trial, REG_EH_REGION, NULL_RTX)))
+ for (i = 0; i < XVECLEN (seq, 0); i++)
+ {
+ rtx insn = XVECEXP (seq, 0, i);
+ if (GET_CODE (insn) == CALL_INSN
+ || (flag_non_call_exceptions
+ && may_trap_p (PATTERN (insn))))
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (eh_note, 0),
+ REG_NOTES (insn));
+ }
+
+ /* If there are LABELS inside the split insns increment the
+ usage count so we don't delete the label. */
+ if (GET_CODE (trial) == INSN)
+ for (i = XVECLEN (seq, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (seq, 0, i)) == INSN)
+ mark_label_nuses (PATTERN (XVECEXP (seq, 0, i)));
tem = emit_insn_after (seq, before);
set LAST and continue from the insn after the one returned.
We can't use next_active_insn here since AFTER may be a note.
Ignore deleted insns, which can be occur if not optimizing. */
- for (tem = NEXT_INSN (before); tem != after;
- tem = NEXT_INSN (tem))
- if (! INSN_DELETED_P (tem)
- && GET_RTX_CLASS (GET_CODE (tem)) == 'i')
+ for (tem = NEXT_INSN (before); tem != after; tem = NEXT_INSN (tem))
+ if (! INSN_DELETED_P (tem) && INSN_P (tem))
tem = try_split (PATTERN (tem), tem, 1);
}
/* Avoid infinite loop if the result matches the original pattern. */
/* Return either the first or the last insn, depending on which was
requested. */
- return last ? prev_active_insn (after) : next_active_insn (before);
+ return last
+ ? (after ? prev_active_insn (after) : last_insn)
+ : next_active_insn (before);
}
return trial;
{
register rtx insn;
- /* If in RTL generation phase, see if FREE_INSN can be used. */
- if (free_insn != 0 && rtx_equal_function_value_matters)
- {
- insn = free_insn;
- free_insn = NEXT_INSN (free_insn);
- PUT_CODE (insn, INSN);
- }
- else
- insn = rtx_alloc (INSN);
+ insn = rtx_alloc (INSN);
INSN_UID (insn) = cur_insn_uid++;
PATTERN (insn) = pattern;
LOG_LINKS (insn) = NULL;
REG_NOTES (insn) = NULL;
+#ifdef ENABLE_RTL_CHECKING
+ if (insn
+ && INSN_P (insn)
+ && (returnjump_p (insn)
+ || (GET_CODE (insn) == SET
+ && SET_DEST (insn) == pc_rtx)))
+ {
+ warning ("ICE: emit_insn used where emit_jump_insn needed:\n");
+ debug_rtx (insn);
+ }
+#endif
+
return insn;
}
NOTE_LINE_NUMBER (after_line),
to);
}
+
+/* Remove unnecessary notes from the instruction stream. */
+
+void
+remove_unnecessary_notes ()
+{
+ rtx block_stack = NULL_RTX;
+ rtx eh_stack = NULL_RTX;
+ rtx insn;
+ rtx next;
+ rtx tmp;
+
+ /* We must not remove the first instruction in the function because
+ the compiler depends on the first instruction being a note. */
+ for (insn = NEXT_INSN (get_insns ()); insn; insn = next)
+ {
+ /* Remember what's next. */
+ next = NEXT_INSN (insn);
+
+ /* We're only interested in notes. */
+ if (GET_CODE (insn) != NOTE)
+ continue;
+
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ case NOTE_INSN_DELETED:
+ remove_insn (insn);
+ break;
+
+ case NOTE_INSN_EH_REGION_BEG:
+ eh_stack = alloc_INSN_LIST (insn, eh_stack);
+ break;
+
+ case NOTE_INSN_EH_REGION_END:
+ /* Too many end notes. */
+ if (eh_stack == NULL_RTX)
+ abort ();
+ /* Mismatched nesting. */
+ if (NOTE_EH_HANDLER (XEXP (eh_stack, 0)) != NOTE_EH_HANDLER (insn))
+ abort ();
+ tmp = eh_stack;
+ eh_stack = XEXP (eh_stack, 1);
+ free_INSN_LIST_node (tmp);
+ break;
+
+ case NOTE_INSN_BLOCK_BEG:
+ /* By now, all notes indicating lexical blocks should have
+ NOTE_BLOCK filled in. */
+ if (NOTE_BLOCK (insn) == NULL_TREE)
+ abort ();
+ block_stack = alloc_INSN_LIST (insn, block_stack);
+ break;
+
+ case NOTE_INSN_BLOCK_END:
+ /* Too many end notes. */
+ if (block_stack == NULL_RTX)
+ abort ();
+ /* Mismatched nesting. */
+ if (NOTE_BLOCK (XEXP (block_stack, 0)) != NOTE_BLOCK (insn))
+ abort ();
+ tmp = block_stack;
+ block_stack = XEXP (block_stack, 1);
+ free_INSN_LIST_node (tmp);
+
+ /* Scan back to see if there are any non-note instructions
+ between INSN and the beginning of this block. If not,
+ then there is no PC range in the generated code that will
+ actually be in this block, so there's no point in
+ remembering the existence of the block. */
+ for (tmp = PREV_INSN (insn); tmp ; tmp = PREV_INSN (tmp))
+ {
+ /* This block contains a real instruction. Note that we
+ don't include labels; if the only thing in the block
+ is a label, then there are still no PC values that
+ lie within the block. */
+ if (INSN_P (tmp))
+ break;
+
+ /* We're only interested in NOTEs. */
+ if (GET_CODE (tmp) != NOTE)
+ continue;
+
+ if (NOTE_LINE_NUMBER (tmp) == NOTE_INSN_BLOCK_BEG)
+ {
+ /* We just verified that this BLOCK matches us
+ with the block_stack check above. */
+ if (debug_ignore_block (NOTE_BLOCK (insn)))
+ {
+ remove_insn (tmp);
+ remove_insn (insn);
+ }
+ break;
+ }
+ else if (NOTE_LINE_NUMBER (tmp) == NOTE_INSN_BLOCK_END)
+ /* There's a nested block. We need to leave the
+ current block in place since otherwise the debugger
+ wouldn't be able to show symbols from our block in
+ the nested block. */
+ break;
+ }
+ }
+ }
+
+ /* Too many begin notes. */
+ if (block_stack || eh_stack)
+ abort ();
+}
+
\f
/* Emit an insn of given code and pattern
at a specified place within the doubly-linked list. */
insn = XVECEXP (pattern, 0, i);
add_insn_before (insn, before);
}
- if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
- sequence_result[XVECLEN (pattern, 0)] = pattern;
}
else
{
return insn;
}
+/* Similar to emit_insn_before, but update basic block boundaries as well. */
+
+rtx
+emit_block_insn_before (pattern, before, block)
+ rtx pattern, before;
+ basic_block block;
+{
+ rtx prev = PREV_INSN (before);
+ rtx r = emit_insn_before (pattern, before);
+ if (block && block->head == before)
+ block->head = NEXT_INSN (prev);
+ return r;
+}
+
/* Make an instruction with body PATTERN and code JUMP_INSN
and output it before the instruction BEFORE. */
add_insn_after (insn, after);
after = insn;
}
- if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
- sequence_result[XVECLEN (pattern, 0)] = pattern;
}
else
{
insn);
}
+/* Similar to emit_insn_after, but update basic block boundaries as well. */
+
+rtx
+emit_block_insn_after (pattern, after, block)
+ rtx pattern, after;
+ basic_block block;
+{
+ rtx r = emit_insn_after (pattern, after);
+ if (block && block->end == after)
+ block->end = r;
+ return r;
+}
+
/* Make an insn of code JUMP_INSN with body PATTERN
and output it after the insn AFTER. */
rtx
emit_line_note_after (file, line, after)
- char *file;
+ const char *file;
int line;
rtx after;
{
insn = XVECEXP (pattern, 0, i);
add_insn (insn);
}
- if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
- sequence_result[XVECLEN (pattern, 0)] = pattern;
}
else
{
rtx
emit_line_note (file, line)
- char *file;
+ const char *file;
int line;
{
set_file_and_line_for_stmt (file, line);
rtx
emit_note (file, line)
- char *file;
+ const char *file;
int line;
{
register rtx note;
rtx
emit_line_note_force (file, line)
- char *file;
+ const char *file;
int line;
{
last_linenum = -1;
else if (code == JUMP_INSN)
{
register rtx insn = emit_jump_insn (x);
- if (simplejump_p (insn) || GET_CODE (x) == RETURN)
+ if (any_uncondjump_p (insn) || GET_CODE (x) == RETURN)
return emit_barrier ();
return insn;
}
pops have previously been deferred; see INHIBIT_DEFER_POP for more
details), use do_pending_stack_adjust before calling this function.
That will ensure that the deferred pops are not accidentally
- emitted in the middel of this sequence. */
+ emitted in the middle of this sequence. */
void
start_sequence ()
{
struct sequence_stack *tem;
- if (sequence_element_free_list)
- {
- /* Reuse a previously-saved struct sequence_stack. */
- tem = sequence_element_free_list;
- sequence_element_free_list = tem->next;
- }
- else
- tem = (struct sequence_stack *) permalloc (sizeof (struct sequence_stack));
+ tem = (struct sequence_stack *) xmalloc (sizeof (struct sequence_stack));
tem->next = seq_stack;
tem->first = first_insn;
last_insn = last;
}
+/* Set up the insn chain from a chain stort in FIRST to LAST. */
+
+void
+push_to_full_sequence (first, last)
+ rtx first, last;
+{
+ start_sequence ();
+ first_insn = first;
+ last_insn = last;
+ /* We really should have the end of the insn chain here. */
+ if (last && NEXT_INSN (last))
+ abort ();
+}
+
/* Set up the outer-level insn chain
as the current sequence, saving the previously current one. */
seq_rtl_expr = tem->sequence_rtl_expr;
seq_stack = tem->next;
- tem->next = sequence_element_free_list;
- sequence_element_free_list = tem;
+ free (tem);
+}
+
+/* This works like end_sequence, but records the old sequence in FIRST
+ and LAST. */
+
+void
+end_full_sequence (first, last)
+ rtx *first, *last;
+{
+ *first = first_insn;
+ *last = last_insn;
+ end_sequence();
}
/* Return 1 if currently emitting into a sequence. */
for (tem = first_insn; tem; tem = NEXT_INSN (tem))
len++;
- /* If only one insn, return its pattern rather than a SEQUENCE.
+ /* If only one insn, return it rather than a SEQUENCE.
(Now that we cache SEQUENCE expressions, it isn't worth special-casing
- the case of an empty list.) */
+ the case of an empty list.)
+ We only return the pattern of an insn if its code is INSN and it
+ has no notes. This ensures that no information gets lost. */
if (len == 1
&& ! RTX_FRAME_RELATED_P (first_insn)
- && (GET_CODE (first_insn) == INSN
- || GET_CODE (first_insn) == JUMP_INSN
- /* Don't discard the call usage field. */
- || (GET_CODE (first_insn) == CALL_INSN
- && CALL_INSN_FUNCTION_USAGE (first_insn) == NULL_RTX)))
- {
- NEXT_INSN (first_insn) = free_insn;
- free_insn = first_insn;
- return PATTERN (first_insn);
- }
+ && GET_CODE (first_insn) == INSN
+ /* Don't throw away any reg notes. */
+ && REG_NOTES (first_insn) == 0)
+ return PATTERN (first_insn);
- /* Put them in a vector. See if we already have a SEQUENCE of the
- appropriate length around. */
- if (len < SEQUENCE_RESULT_SIZE && (result = sequence_result[len]) != 0)
- sequence_result[len] = 0;
- else
- {
- /* Ensure that this rtl goes in saveable_obstack, since we may
- cache it. */
- push_obstacks_nochange ();
- rtl_in_saveable_obstack ();
- result = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (len));
- pop_obstacks ();
- }
+ result = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (len));
for (i = 0, tem = first_insn; tem; tem = NEXT_INSN (tem), i++)
XVECEXP (result, 0, i) = tem;
int i;
/* Clear the start_sequence/gen_sequence cache. */
- sequence_element_free_list = 0;
for (i = 0; i < SEQUENCE_RESULT_SIZE; i++)
sequence_result[i] = 0;
free_insn = 0;
}
+\f
+/* Used by copy_insn_1 to avoid copying SCRATCHes more than once. */
+static rtx copy_insn_scratch_in[MAX_RECOG_OPERANDS];
+static rtx copy_insn_scratch_out[MAX_RECOG_OPERANDS];
+static int copy_insn_n_scratches;
+
+/* When an insn is being copied by copy_insn_1, this is nonzero if we have
+ copied an ASM_OPERANDS.
+ In that case, it is the original input-operand vector. */
+static rtvec orig_asm_operands_vector;
+
+/* When an insn is being copied by copy_insn_1, this is nonzero if we have
+ copied an ASM_OPERANDS.
+ In that case, it is the copied input-operand vector. */
+static rtvec copy_asm_operands_vector;
+
+/* Likewise for the constraints vector. */
+static rtvec orig_asm_constraints_vector;
+static rtvec copy_asm_constraints_vector;
+
+/* Recursively create a new copy of an rtx for copy_insn.
+ This function differs from copy_rtx in that it handles SCRATCHes and
+ ASM_OPERANDs properly.
+ Normally, this function is not used directly; use copy_insn as front end.
+ However, you could first copy an insn pattern with copy_insn and then use
+ this function afterwards to properly copy any REG_NOTEs containing
+ SCRATCHes. */
+
+rtx
+copy_insn_1 (orig)
+ register rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register const char *format_ptr;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ADDRESSOF:
+ return orig;
+
+ case SCRATCH:
+ for (i = 0; i < copy_insn_n_scratches; i++)
+ if (copy_insn_scratch_in[i] == orig)
+ return copy_insn_scratch_out[i];
+ break;
+
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
+ return orig;
+ break;
+
+ /* A MEM with a constant address is not sharable. The problem is that
+ the constant address may need to be reloaded. If the mem is shared,
+ then reloading one copy of this mem will cause all copies to appear
+ to have been reloaded. */
+
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+
+ /* Copy the various flags, and other information. We assume that
+ all fields need copying, and then clear the fields that should
+ not be copied. That is the sensible default behavior, and forces
+ us to explicitly document why we are *not* copying a flag. */
+ memcpy (copy, orig, sizeof (struct rtx_def) - sizeof (rtunion));
+
+ /* We do not copy the USED flag, which is used as a mark bit during
+ walks over the RTL. */
+ copy->used = 0;
+
+ /* We do not copy JUMP, CALL, or FRAME_RELATED for INSNs. */
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ copy->jump = 0;
+ copy->call = 0;
+ copy->frame_related = 0;
+ }
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ copy->fld[i] = orig->fld[i];
+ switch (*format_ptr++)
+ {
+ case 'e':
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = copy_insn_1 (XEXP (orig, i));
+ break;
+
+ case 'E':
+ case 'V':
+ if (XVEC (orig, i) == orig_asm_constraints_vector)
+ XVEC (copy, i) = copy_asm_constraints_vector;
+ else if (XVEC (orig, i) == orig_asm_operands_vector)
+ XVEC (copy, i) = copy_asm_operands_vector;
+ else if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = copy_insn_1 (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 't':
+ case 'w':
+ case 'i':
+ case 's':
+ case 'S':
+ case 'u':
+ case '0':
+ /* These are left unchanged. */
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ if (code == SCRATCH)
+ {
+ i = copy_insn_n_scratches++;
+ if (i >= MAX_RECOG_OPERANDS)
+ abort ();
+ copy_insn_scratch_in[i] = orig;
+ copy_insn_scratch_out[i] = copy;
+ }
+ else if (code == ASM_OPERANDS)
+ {
+ orig_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (orig);
+ copy_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (copy);
+ orig_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (orig);
+ copy_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (copy);
+ }
+
+ return copy;
+}
+
+/* Create a new copy of an rtx.
+ This function differs from copy_rtx in that it handles SCRATCHes and
+ ASM_OPERANDs properly.
+ INSN doesn't really have to be a full INSN; it could be just the
+ pattern. */
+rtx
+copy_insn (insn)
+ rtx insn;
+{
+ copy_insn_n_scratches = 0;
+ orig_asm_operands_vector = 0;
+ orig_asm_constraints_vector = 0;
+ copy_asm_operands_vector = 0;
+ copy_asm_constraints_vector = 0;
+ return copy_insn_1 (insn);
+}
/* Initialize data structures and variables in this file
before generating rtl for each function. */
void
init_emit ()
{
- struct function *f = current_function;
+ struct function *f = cfun;
f->emit = (struct emit_status *) xmalloc (sizeof (struct emit_status));
first_insn = NULL;
/* Init the tables that describe all the pseudo regs. */
- f->emit->regno_pointer_flag_length = LAST_VIRTUAL_REGISTER + 101;
-
- f->emit->regno_pointer_flag
- = (char *) xmalloc (f->emit->regno_pointer_flag_length);
- bzero (f->emit->regno_pointer_flag, f->emit->regno_pointer_flag_length);
+ f->emit->regno_pointer_align_length = LAST_VIRTUAL_REGISTER + 101;
f->emit->regno_pointer_align
- = (char *) xmalloc (f->emit->regno_pointer_flag_length);
- bzero (f->emit->regno_pointer_align, f->emit->regno_pointer_flag_length);
+ = (unsigned char *) xcalloc (f->emit->regno_pointer_align_length,
+ sizeof (unsigned char));
regno_reg_rtx
- = (rtx *) xmalloc (f->emit->regno_pointer_flag_length * sizeof (rtx));
- bzero ((char *) regno_reg_rtx,
- f->emit->regno_pointer_flag_length * sizeof (rtx));
+ = (rtx *) xcalloc (f->emit->regno_pointer_align_length * sizeof (rtx),
+ sizeof (rtx));
/* Put copies of all the virtual register rtx into regno_reg_rtx. */
init_virtual_regs (f->emit);
/* Indicate that the virtual registers and stack locations are
all pointers. */
- REGNO_POINTER_FLAG (STACK_POINTER_REGNUM) = 1;
- REGNO_POINTER_FLAG (FRAME_POINTER_REGNUM) = 1;
- REGNO_POINTER_FLAG (HARD_FRAME_POINTER_REGNUM) = 1;
- REGNO_POINTER_FLAG (ARG_POINTER_REGNUM) = 1;
+ REG_POINTER (stack_pointer_rtx) = 1;
+ REG_POINTER (frame_pointer_rtx) = 1;
+ REG_POINTER (hard_frame_pointer_rtx) = 1;
+ REG_POINTER (arg_pointer_rtx) = 1;
- REGNO_POINTER_FLAG (VIRTUAL_INCOMING_ARGS_REGNUM) = 1;
- REGNO_POINTER_FLAG (VIRTUAL_STACK_VARS_REGNUM) = 1;
- REGNO_POINTER_FLAG (VIRTUAL_STACK_DYNAMIC_REGNUM) = 1;
- REGNO_POINTER_FLAG (VIRTUAL_OUTGOING_ARGS_REGNUM) = 1;
- REGNO_POINTER_FLAG (VIRTUAL_CFA_REGNUM) = 1;
+ REG_POINTER (virtual_incoming_args_rtx) = 1;
+ REG_POINTER (virtual_stack_vars_rtx) = 1;
+ REG_POINTER (virtual_stack_dynamic_rtx) = 1;
+ REG_POINTER (virtual_outgoing_args_rtx) = 1;
+ REG_POINTER (virtual_cfa_rtx) = 1;
#ifdef STACK_BOUNDARY
- REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM)
- = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY / BITS_PER_UNIT;
-
- REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM)
- = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM)
- = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM)
- = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM)
- = STACK_BOUNDARY / BITS_PER_UNIT;
- REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = UNITS_PER_WORD;
+ REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY;
+
+ REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = BITS_PER_WORD;
#endif
#ifdef INIT_EXPANDERS
/* Mark ES for GC. */
void
-mark_emit_state (es)
+mark_emit_status (es)
struct emit_status *es;
{
rtx *r;
if (es == 0)
return;
- for (i = es->regno_pointer_flag_length, r = es->x_regno_reg_rtx;
+ for (i = es->regno_pointer_align_length, r = es->x_regno_reg_rtx;
i > 0; --i, ++r)
ggc_mark_rtx (*r);
enum machine_mode mode;
enum machine_mode double_mode;
+ /* Initialize the CONST_INT hash table. */
+ const_int_htab = htab_create (37, const_int_htab_hash,
+ const_int_htab_eq, NULL);
+ ggc_add_root (&const_int_htab, 1, sizeof (const_int_htab),
+ rtx_htab_mark);
+
no_line_numbers = ! line_numbers;
/* Compute the word and byte modes. */
word_mode = mode;
}
-#ifndef DOUBLE_TYPE_SIZE
-#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
-#endif
-
for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
ptr_mode = mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0);
+ /* Assign register numbers to the globally defined register rtx.
+ This must be done at runtime because the register number field
+ is in a union and some compilers can't initialize unions. */
+
+ pc_rtx = gen_rtx (PC, VOIDmode);
+ cc0_rtx = gen_rtx (CC0, VOIDmode);
+ stack_pointer_rtx = gen_raw_REG (Pmode, STACK_POINTER_REGNUM);
+ frame_pointer_rtx = gen_raw_REG (Pmode, FRAME_POINTER_REGNUM);
+ if (hard_frame_pointer_rtx == 0)
+ hard_frame_pointer_rtx = gen_raw_REG (Pmode,
+ HARD_FRAME_POINTER_REGNUM);
+ if (arg_pointer_rtx == 0)
+ arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM);
+ virtual_incoming_args_rtx =
+ gen_raw_REG (Pmode, VIRTUAL_INCOMING_ARGS_REGNUM);
+ virtual_stack_vars_rtx =
+ gen_raw_REG (Pmode, VIRTUAL_STACK_VARS_REGNUM);
+ virtual_stack_dynamic_rtx =
+ gen_raw_REG (Pmode, VIRTUAL_STACK_DYNAMIC_REGNUM);
+ virtual_outgoing_args_rtx =
+ gen_raw_REG (Pmode, VIRTUAL_OUTGOING_ARGS_REGNUM);
+ virtual_cfa_rtx = gen_raw_REG (Pmode, VIRTUAL_CFA_REGNUM);
+
+ /* These rtx must be roots if GC is enabled. */
+ ggc_add_rtx_root (global_rtl, GR_MAX);
+
+#ifdef INIT_EXPANDERS
+ /* This is to initialize {init|mark|free}_machine_status before the first
+ call to push_function_context_to. This is needed by the Chill front
+ end which calls push_function_context_to before the first cal to
+ init_function_start. */
+ INIT_EXPANDERS;
+#endif
+
/* Create the unique rtx's for certain rtx codes and operand values. */
+ /* Don't use gen_rtx here since gen_rtx in this case
+ tries to use these variables. */
for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++)
- {
- PUT_CODE (&const_int_rtx[i + MAX_SAVED_CONST_INT], CONST_INT);
- PUT_MODE (&const_int_rtx[i + MAX_SAVED_CONST_INT], VOIDmode);
- INTVAL (&const_int_rtx[i + MAX_SAVED_CONST_INT]) = i;
- }
+ const_int_rtx[i + MAX_SAVED_CONST_INT] =
+ gen_rtx_raw_CONST_INT (VOIDmode, i);
+ ggc_add_rtx_root (const_int_rtx, 2 * MAX_SAVED_CONST_INT + 1);
if (STORE_FLAG_VALUE >= - MAX_SAVED_CONST_INT
&& STORE_FLAG_VALUE <= MAX_SAVED_CONST_INT)
- const_true_rtx = &const_int_rtx[STORE_FLAG_VALUE + MAX_SAVED_CONST_INT];
+ const_true_rtx = const_int_rtx[STORE_FLAG_VALUE + MAX_SAVED_CONST_INT];
else
const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE);
rtx tem = rtx_alloc (CONST_DOUBLE);
union real_extract u;
- bzero ((char *) &u, sizeof u); /* Zero any holes in a structure. */
+ memset ((char *) &u, 0, sizeof u); /* Zero any holes in a structure. */
u.d = i == 0 ? dconst0 : i == 1 ? dconst1 : dconst2;
- bcopy ((char *) &u, (char *) &CONST_DOUBLE_LOW (tem), sizeof u);
+ memcpy (&CONST_DOUBLE_LOW (tem), &u, sizeof u);
CONST_DOUBLE_MEM (tem) = cc0_rtx;
+ CONST_DOUBLE_CHAIN (tem) = NULL_RTX;
PUT_MODE (tem, mode);
const_tiny_rtx[i][(int) mode] = tem;
const_tiny_rtx[i][(int) mode] = GEN_INT (i);
}
- for (mode = CCmode; mode < MAX_MACHINE_MODE; ++mode)
- if (GET_MODE_CLASS (mode) == MODE_CC)
- const_tiny_rtx[0][(int) mode] = const0_rtx;
-
- /* Assign register numbers to the globally defined register rtx.
- This must be done at runtime because the register number field
- is in a union and some compilers can't initialize unions. */
+ for (i = (int) CCmode; i < (int) MAX_MACHINE_MODE; ++i)
+ if (GET_MODE_CLASS ((enum machine_mode) i) == MODE_CC)
+ const_tiny_rtx[0][i] = const0_rtx;
- REGNO (stack_pointer_rtx) = STACK_POINTER_REGNUM;
- PUT_MODE (stack_pointer_rtx, Pmode);
- REGNO (frame_pointer_rtx) = FRAME_POINTER_REGNUM;
- PUT_MODE (frame_pointer_rtx, Pmode);
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- REGNO (hard_frame_pointer_rtx) = HARD_FRAME_POINTER_REGNUM;
- PUT_MODE (hard_frame_pointer_rtx, Pmode);
-#endif
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && HARD_FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- REGNO (arg_pointer_rtx) = ARG_POINTER_REGNUM;
- PUT_MODE (arg_pointer_rtx, Pmode);
-#endif
+ const_tiny_rtx[0][(int) BImode] = const0_rtx;
+ if (STORE_FLAG_VALUE == 1)
+ const_tiny_rtx[1][(int) BImode] = const1_rtx;
- REGNO (virtual_incoming_args_rtx) = VIRTUAL_INCOMING_ARGS_REGNUM;
- PUT_MODE (virtual_incoming_args_rtx, Pmode);
- REGNO (virtual_stack_vars_rtx) = VIRTUAL_STACK_VARS_REGNUM;
- PUT_MODE (virtual_stack_vars_rtx, Pmode);
- REGNO (virtual_stack_dynamic_rtx) = VIRTUAL_STACK_DYNAMIC_REGNUM;
- PUT_MODE (virtual_stack_dynamic_rtx, Pmode);
- REGNO (virtual_outgoing_args_rtx) = VIRTUAL_OUTGOING_ARGS_REGNUM;
- PUT_MODE (virtual_outgoing_args_rtx, Pmode);
- REGNO (virtual_cfa_rtx) = VIRTUAL_CFA_REGNUM;
- PUT_MODE (virtual_cfa_rtx, Pmode);
+ /* For bounded pointers, `&const_tiny_rtx[0][0]' is not the same as
+ `(rtx *) const_tiny_rtx'. The former has bounds that only cover
+ `const_tiny_rtx[0]', whereas the latter has bounds that cover all. */
+ ggc_add_rtx_root ((rtx *) const_tiny_rtx, sizeof const_tiny_rtx / sizeof (rtx));
+ ggc_add_rtx_root (&const_true_rtx, 1);
#ifdef RETURN_ADDRESS_POINTER_REGNUM
return_address_pointer_rtx
- = gen_rtx_raw_REG (Pmode, RETURN_ADDRESS_POINTER_REGNUM);
+ = gen_raw_REG (Pmode, RETURN_ADDRESS_POINTER_REGNUM);
#endif
#ifdef STRUCT_VALUE
#ifdef STATIC_CHAIN_INCOMING_REGNUM
if (STATIC_CHAIN_INCOMING_REGNUM != STATIC_CHAIN_REGNUM)
- static_chain_incoming_rtx = gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
+ static_chain_incoming_rtx
+ = gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
else
#endif
static_chain_incoming_rtx = static_chain_rtx;
#endif
#endif
-#ifdef PIC_OFFSET_TABLE_REGNUM
- pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
-#endif
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
-#ifdef INIT_EXPANDERS
- /* This is to initialize save_machine_status and restore_machine_status before
- the first call to push_function_context_to. This is needed by the Chill
- front end which calls push_function_context_to before the first cal to
- init_function_start. */
- INIT_EXPANDERS;
-#endif
+ ggc_add_rtx_root (&pic_offset_table_rtx, 1);
+ ggc_add_rtx_root (&struct_value_rtx, 1);
+ ggc_add_rtx_root (&struct_value_incoming_rtx, 1);
+ ggc_add_rtx_root (&static_chain_rtx, 1);
+ ggc_add_rtx_root (&static_chain_incoming_rtx, 1);
+ ggc_add_rtx_root (&return_address_pointer_rtx, 1);
}
\f
/* Query and clear/ restore no_line_numbers. This is used by the