#include "intl.h"
#include "tm_p.h"
#include "tree-iterator.h"
+#include "tree-pass.h"
+#include "tree-flow.h"
#include "target.h"
+#include "timevar.h"
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
#endif
#endif
-/* Convert defined/undefined to boolean. */
-#ifdef TARGET_MEM_FUNCTIONS
-#undef TARGET_MEM_FUNCTIONS
-#define TARGET_MEM_FUNCTIONS 1
-#else
-#define TARGET_MEM_FUNCTIONS 0
-#endif
-
/* If this is nonzero, we do not bother generating VOLATILE
around volatile memory references, and we are willing to
&& ((code = can_extend_p (to_mode, word_mode, unsignedp))
!= CODE_FOR_nothing))
{
- if (GET_CODE (to) == REG)
+ if (REG_P (to))
{
if (reg_overlap_mentioned_p (to, from))
from = force_reg (from_mode, from);
if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD
&& GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD)
{
- if (!((GET_CODE (from) == MEM
+ if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
&& ! mode_dependent_address_p (XEXP (from, 0)))
- || GET_CODE (from) == REG
+ || REG_P (from)
|| GET_CODE (from) == SUBREG))
from = force_reg (from_mode, from);
convert_move (to, gen_lowpart (word_mode, from), 0);
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
GET_MODE_BITSIZE (from_mode)))
{
- if (!((GET_CODE (from) == MEM
+ if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
&& ! mode_dependent_address_p (XEXP (from, 0)))
- || GET_CODE (from) == REG
+ || REG_P (from)
|| GET_CODE (from) == SUBREG))
from = force_reg (from_mode, from);
- if (GET_CODE (from) == REG && REGNO (from) < FIRST_PSEUDO_REGISTER
+ if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
&& ! HARD_REGNO_MODE_OK (REGNO (from), to_mode))
from = copy_to_reg (from);
emit_move_insn (to, gen_lowpart (to_mode, from));
&& GET_MODE_CLASS (oldmode) == MODE_INT
&& (GET_CODE (x) == CONST_DOUBLE
|| (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode)
- && ((GET_CODE (x) == MEM && ! MEM_VOLATILE_P (x)
+ && ((MEM_P (x) && ! MEM_VOLATILE_P (x)
&& direct_load[(int) mode])
- || (GET_CODE (x) == REG
+ || (REG_P (x)
&& (! HARD_REGISTER_P (x)
|| HARD_REGNO_MODE_OK (REGNO (x), mode))
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
- if (GET_MODE (x) != BLKmode)
- abort ();
- if (GET_MODE (y) != BLKmode)
- abort ();
-
x = protect_from_queue (x, 1);
y = protect_from_queue (y, 0);
size = protect_from_queue (size, 0);
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
abort ();
- if (GET_CODE (y) != MEM)
+ if (!MEM_P (y))
abort ();
if (size == 0)
abort ();
+ /* Make sure we've got BLKmode addresses; store_one_arg can decide that
+ block copy is more efficient for other large modes, e.g. DCmode. */
+ x = adjust_address (x, BLKmode, 0);
+ y = adjust_address (y, BLKmode, 0);
+
/* Set MEM_SIZE as appropriate for this block copy. The main place this
can be incorrect is coming from __builtin_memcpy. */
if (GET_CODE (size) == CONST_INT)
return false;
}
-/* A subroutine of emit_block_move. Expand a call to memcpy or bcopy.
+/* A subroutine of emit_block_move. Expand a call to memcpy.
Return the return value from memcpy, 0 otherwise. */
static rtx
could get the wrong value for an argument.
To avoid this problem we go ahead and emit code to copy the addresses of
- DST and SRC and SIZE into new pseudos. We can then place those new
- pseudos into an RTL_EXPR and use them later, even after a call to
- emit_queue.
+ DST and SRC and SIZE into new pseudos.
Note this is not strictly needed for library calls since they do not call
emit_queue before loading their arguments. However, we may need to have
dst_tree = make_tree (ptr_type_node, dst_addr);
src_tree = make_tree (ptr_type_node, src_addr);
- if (TARGET_MEM_FUNCTIONS)
- size_mode = TYPE_MODE (sizetype);
- else
- size_mode = TYPE_MODE (unsigned_type_node);
+ size_mode = TYPE_MODE (sizetype);
size = convert_to_mode (size_mode, size, 1);
size = copy_to_mode_reg (size_mode, size);
memcpy in this context. This could be a user call to memcpy and
the user may wish to examine the return value from memcpy. For
targets where libcalls and normal calls have different conventions
- for returning pointers, we could end up generating incorrect code.
-
- For convenience, we generate the call to bcopy this way as well. */
+ for returning pointers, we could end up generating incorrect code. */
- if (TARGET_MEM_FUNCTIONS)
- size_tree = make_tree (sizetype, size);
- else
- size_tree = make_tree (unsigned_type_node, size);
+ size_tree = make_tree (sizetype, size);
fn = emit_block_move_libcall_fn (true);
arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE);
- if (TARGET_MEM_FUNCTIONS)
- {
- arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
- arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
- }
- else
- {
- arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
- arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
- }
+ arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
+ arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
/* Now we have to build up the CALL_EXPR itself. */
call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
gen_rtx_CLOBBER (VOIDmode, dst),
NULL_RTX));
- return TARGET_MEM_FUNCTIONS ? retval : NULL_RTX;
+ return retval;
}
/* A subroutine of emit_block_move_via_libcall. Create the tree node
{
tree args, fn;
- if (TARGET_MEM_FUNCTIONS)
- {
- fn = get_identifier ("memcpy");
- args = build_function_type_list (ptr_type_node, ptr_type_node,
- const_ptr_type_node, sizetype,
- NULL_TREE);
- }
- else
- {
- fn = get_identifier ("bcopy");
- args = build_function_type_list (void_type_node, const_ptr_type_node,
- ptr_type_node, unsigned_type_node,
- NULL_TREE);
- }
+ fn = get_identifier ("memcpy");
+ args = build_function_type_list (ptr_type_node, ptr_type_node,
+ const_ptr_type_node, sizetype,
+ NULL_TREE);
fn = build_decl (FUNCTION_DECL, fn, args);
DECL_EXTERNAL (fn) = 1;
from strange tricks we might play; but make sure that the source can
be loaded directly into the destination. */
src = orig_src;
- if (GET_CODE (orig_src) != MEM
+ if (!MEM_P (orig_src)
&& (!CONSTANT_P (orig_src)
|| (GET_MODE (orig_src) != mode
&& GET_MODE (orig_src) != VOIDmode)))
}
/* Optimize the access just a bit. */
- if (GET_CODE (src) == MEM
+ if (MEM_P (src)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src))
|| MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
to be extracted. */
tmps[i] = XEXP (src, bytepos / slen0);
if (! CONSTANT_P (tmps[i])
- && (GET_CODE (tmps[i]) != REG || GET_MODE (tmps[i]) != mode))
+ && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))
tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
(bytepos % slen0) * BITS_PER_UNIT,
1, NULL_RTX, mode, mode, ssize);
SIMD register, which is currently broken. While we get GCC
to emit proper RTL for these cases, let's dump to memory. */
else if (VECTOR_MODE_P (GET_MODE (dst))
- && GET_CODE (src) == REG)
+ && REG_P (src))
{
int slen = GET_MODE_SIZE (GET_MODE (src));
rtx mem;
&& XVECLEN (dst, 0) > 1)
tmps[i] = simplify_gen_subreg (mode, src, GET_MODE(dst), bytepos);
else if (CONSTANT_P (src)
- || (GET_CODE (src) == REG && GET_MODE (src) == mode))
+ || (REG_P (src) && GET_MODE (src) == mode))
tmps[i] = src;
else
tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
mode, mode, ssize);
if (shift)
- expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift),
- tmps[i], 0, OPTAB_WIDEN);
+ tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
+ build_int_2 (shift, 0), tmps[i], 0);
}
emit_queue ();
emit_group_load (dst, temp, type, ssize);
return;
}
- else if (GET_CODE (dst) != MEM && GET_CODE (dst) != CONCAT)
+ else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
{
dst = gen_reg_rtx (GET_MODE (orig_dst));
/* Make life a bit easier for combine. */
)
{
int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
- expand_binop (mode, ashr_optab, tmps[i], GEN_INT (shift),
- tmps[i], 0, OPTAB_WIDEN);
+ tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
+ build_int_2 (shift, 0), tmps[i], 0);
}
bytelen = ssize - bytepos;
}
}
/* Optimize the access just a bit. */
- if (GET_CODE (dest) == MEM
+ if (MEM_P (dest)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
|| MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
void
use_reg (rtx *call_fusage, rtx reg)
{
- if (GET_CODE (reg) != REG
+ if (!REG_P (reg)
|| REGNO (reg) >= FIRST_PSEUDO_REGISTER)
abort ();
/* A NULL entry means the parameter goes both on the stack and in
registers. This can also be a MEM for targets that pass values
partially on the stack and partially in registers. */
- if (reg != 0 && GET_CODE (reg) == REG)
+ if (reg != 0 && REG_P (reg))
use_reg (call_fusage, reg);
}
}
clear_storage (rtx object, rtx size)
{
rtx retval = 0;
- unsigned int align = (GET_CODE (object) == MEM ? MEM_ALIGN (object)
+ unsigned int align = (MEM_P (object) ? MEM_ALIGN (object)
: GET_MODE_ALIGNMENT (GET_MODE (object)));
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
return false;
}
-/* A subroutine of clear_storage. Expand a call to memset or bzero.
+/* A subroutine of clear_storage. Expand a call to memset.
Return the return value of memset, 0 otherwise. */
static rtx
not careful we could get the wrong value for an argument.
To avoid this problem we go ahead and emit code to copy OBJECT
- and SIZE into new pseudos. We can then place those new pseudos
- into an RTL_EXPR and use them later, even after a call to
- emit_queue.
+ and SIZE into new pseudos.
Note this is not strictly needed for library calls since they
do not call emit_queue before loading their arguments. However,
object = copy_to_mode_reg (Pmode, XEXP (object, 0));
- if (TARGET_MEM_FUNCTIONS)
- size_mode = TYPE_MODE (sizetype);
- else
- size_mode = TYPE_MODE (unsigned_type_node);
+ size_mode = TYPE_MODE (sizetype);
size = convert_to_mode (size_mode, size, 1);
size = copy_to_mode_reg (size_mode, size);
memset in this context. This could be a user call to memset and
the user may wish to examine the return value from memset. For
targets where libcalls and normal calls have different conventions
- for returning pointers, we could end up generating incorrect code.
-
- For convenience, we generate the call to bzero this way as well. */
+ for returning pointers, we could end up generating incorrect code. */
object_tree = make_tree (ptr_type_node, object);
- if (TARGET_MEM_FUNCTIONS)
- size_tree = make_tree (sizetype, size);
- else
- size_tree = make_tree (unsigned_type_node, size);
+ size_tree = make_tree (sizetype, size);
fn = clear_storage_libcall_fn (true);
arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE);
- if (TARGET_MEM_FUNCTIONS)
- arg_list = tree_cons (NULL_TREE, integer_zero_node, arg_list);
+ arg_list = tree_cons (NULL_TREE, integer_zero_node, arg_list);
arg_list = tree_cons (NULL_TREE, object_tree, arg_list);
/* Now we have to build up the CALL_EXPR itself. */
if (RTX_UNCHANGING_P (object))
emit_insn (gen_rtx_CLOBBER (VOIDmode, object));
- return (TARGET_MEM_FUNCTIONS ? retval : NULL_RTX);
+ return retval;
}
/* A subroutine of clear_storage_via_libcall. Create the tree node
{
tree fn, args;
- if (TARGET_MEM_FUNCTIONS)
- {
- fn = get_identifier ("memset");
- args = build_function_type_list (ptr_type_node, ptr_type_node,
- integer_type_node, sizetype,
- NULL_TREE);
- }
- else
- {
- fn = get_identifier ("bzero");
- args = build_function_type_list (void_type_node, ptr_type_node,
- unsigned_type_node, NULL_TREE);
- }
+ fn = get_identifier ("memset");
+ args = build_function_type_list (ptr_type_node, ptr_type_node,
+ integer_type_node, sizetype,
+ NULL_TREE);
fn = build_decl (FUNCTION_DECL, fn, args);
DECL_EXTERNAL (fn) = 1;
/* If X or Y are memory references, verify that their addresses are valid
for the machine. */
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
&& ! push_operand (x, GET_MODE (x)))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (x, 0)))))
x = validize_mem (x);
- if (GET_CODE (y) == MEM
+ if (MEM_P (y)
&& (! memory_address_p (GET_MODE (y), XEXP (y, 0))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (y, 0)))))
last_insn = emit_move_insn_1 (x, y);
- if (y_cst && GET_CODE (x) == REG
+ if (y_cst && REG_P (x)
&& (set = single_set (last_insn)) != NULL_RTX
&& SET_DEST (set) == x
&& ! rtx_equal_p (y_cst, SET_SRC (set)))
if (reload_in_progress)
{
x = gen_lowpart_common (tmode, x1);
- if (x == 0 && GET_CODE (x1) == MEM)
+ if (x == 0 && MEM_P (x1))
{
x = adjust_address_nv (x1, tmode, 0);
copy_replacements (x1, x);
}
y = gen_lowpart_common (tmode, y1);
- if (y == 0 && GET_CODE (y1) == MEM)
+ if (y == 0 && MEM_P (y1))
{
y = adjust_address_nv (y1, tmode, 0);
copy_replacements (y1, y);
/* If we are in reload, see if either operand is a MEM whose address
is scheduled for replacement. */
- if (reload_in_progress && GET_CODE (x) == MEM
+ if (reload_in_progress && MEM_P (x)
&& (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
x = replace_equiv_address_nv (x, inner);
- if (reload_in_progress && GET_CODE (y) == MEM
+ if (reload_in_progress && MEM_P (y)
&& (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
y = replace_equiv_address_nv (y, inner);
emit_unop_insn (ic, x, trunc_y, UNKNOWN);
last_insn = get_last_insn ();
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
set_unique_reg_note (last_insn, REG_EQUAL, y);
return last_insn;
size = convert_modes (Pmode, ptr_mode, size, 1);
if (CONSTANT_P (size))
anti_adjust_stack (plus_constant (size, extra));
- else if (GET_CODE (size) == REG && extra == 0)
+ else if (REG_P (size) && extra == 0)
anti_adjust_stack (size);
else
{
/* If X is a hard register in a non-integer mode, copy it into a pseudo;
SUBREGs of such registers are not allowed. */
- if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER
+ if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
&& GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
x = copy_to_reg (x);
{
return ((x == 0
/* Only registers can be subtargets. */
- || GET_CODE (x) != REG
+ || !REG_P (x)
/* If the register is readonly, it can't be set more than once. */
|| RTX_UNCHANGING_P (x)
/* Don't use hard regs to avoid extending their life. */
{
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
- if (GET_CODE (to_rtx) != MEM)
+ if (!MEM_P (to_rtx))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
/* A constant address in TO_RTX can have VOIDmode, we must not try
to call force_reg for that case. Avoid that case. */
- if (GET_CODE (to_rtx) == MEM
+ if (MEM_P (to_rtx)
&& GET_MODE (to_rtx) == BLKmode
&& GET_MODE (XEXP (to_rtx, 0)) != VOIDmode
&& bitsize > 0
offset));
}
- if (GET_CODE (to_rtx) == MEM)
+ if (MEM_P (to_rtx))
{
/* If the field is at offset zero, we could have been given the
DECL_RTX of the parent struct. Don't munge it. */
/* Deal with volatile and readonly fields. The former is only done
for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
- if (volatilep && GET_CODE (to_rtx) == MEM)
+ if (volatilep && MEM_P (to_rtx))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
/* We can't assert that a MEM won't be set more than once
if the component is not addressable because another
non-addressable component may be referenced by the same MEM. */
- && ! (GET_CODE (to_rtx) == MEM && ! can_address_p (to)))
+ && ! (MEM_P (to_rtx) && ! can_address_p (to)))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
RTX_UNCHANGING_P (to_rtx) = 1;
}
- if (GET_CODE (to_rtx) == MEM && ! can_address_p (to))
+ if (MEM_P (to_rtx) && ! can_address_p (to))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
+ /* Disabled temporarily. GET_MODE (to_rtx) is often not the right
+ mode. */
+ while (0 && mode1 == VOIDmode && !want_value
+ && bitpos + bitsize <= BITS_PER_WORD
+ && bitsize < BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (to_rtx)) <= BITS_PER_WORD
+ && !TREE_SIDE_EFFECTS (to)
+ && !TREE_THIS_VOLATILE (to))
+ {
+ tree src, op0, op1;
+ rtx value;
+ HOST_WIDE_INT count = bitpos;
+ optab binop;
+
+ src = from;
+ STRIP_NOPS (src);
+ if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE
+ || TREE_CODE_CLASS (TREE_CODE (src)) != '2')
+ break;
+
+ op0 = TREE_OPERAND (src, 0);
+ op1 = TREE_OPERAND (src, 1);
+ STRIP_NOPS (op0);
+
+ if (! operand_equal_p (to, op0, 0))
+ break;
+
+ if (BYTES_BIG_ENDIAN)
+ count = GET_MODE_BITSIZE (GET_MODE (to_rtx)) - bitpos - bitsize;
+
+ /* Special case some bitfield op= exp. */
+ switch (TREE_CODE (src))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ if (count <= 0)
+ break;
+
+ /* For now, just optimize the case of the topmost bitfield
+ where we don't need to do any masking and also
+ 1 bit bitfields where xor can be used.
+ We might win by one instruction for the other bitfields
+ too if insv/extv instructions aren't used, so that
+ can be added later. */
+ if (count + bitsize != GET_MODE_BITSIZE (GET_MODE (to_rtx))
+ && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
+ break;
+ value = expand_expr (op1, NULL_RTX, VOIDmode, 0);
+ value = protect_from_queue (value, 0);
+ to_rtx = protect_from_queue (to_rtx, 1);
+ binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ if (bitsize == 1
+ && count + bitsize != GET_MODE_BITSIZE (GET_MODE (to_rtx)))
+ {
+ value = expand_and (GET_MODE (to_rtx), value, const1_rtx,
+ NULL_RTX);
+ binop = xor_optab;
+ }
+ value = expand_shift (LSHIFT_EXPR, GET_MODE (to_rtx),
+ value, build_int_2 (count, 0),
+ NULL_RTX, 1);
+ result = expand_binop (GET_MODE (to_rtx), binop, to_rtx,
+ value, to_rtx, 1, OPTAB_WIDEN);
+ if (result != to_rtx)
+ emit_move_insn (to_rtx, result);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return NULL_RTX;
+ default:
+ break;
+ }
+
+ break;
+ }
+
result = store_field (to_rtx, bitsize, bitpos, mode1, from,
(want_value
/* Spurious cast for HPUX compiler. */
if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
&& ! ((TREE_CODE (to) == VAR_DECL || TREE_CODE (to) == PARM_DECL)
- && GET_CODE (DECL_RTL (to)) == REG))
+ && REG_P (DECL_RTL (to))))
{
rtx value;
/* Don't move directly into a return register. */
if (TREE_CODE (to) == RESULT_DECL
- && (GET_CODE (to_rtx) == REG || GET_CODE (to_rtx) == PARALLEL))
+ && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
{
rtx temp;
size = expr_size (from);
from_rtx = expand_expr (from, NULL_RTX, VOIDmode, 0);
- if (TARGET_MEM_FUNCTIONS)
- emit_library_call (memmove_libfunc, LCT_NORMAL,
- VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
- XEXP (from_rtx, 0), Pmode,
- convert_to_mode (TYPE_MODE (sizetype),
- size, TYPE_UNSIGNED (sizetype)),
- TYPE_MODE (sizetype));
- else
- emit_library_call (bcopy_libfunc, LCT_NORMAL,
- VOIDmode, 3, XEXP (from_rtx, 0), Pmode,
- XEXP (to_rtx, 0), Pmode,
- convert_to_mode (TYPE_MODE (integer_type_node),
- size,
- TYPE_UNSIGNED (integer_type_node)),
- TYPE_MODE (integer_type_node));
+ emit_library_call (memmove_libfunc, LCT_NORMAL,
+ VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
+ XEXP (from_rtx, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TYPE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
preserve_temp_slots (to_rtx);
free_temp_slots ();
dont_return_target = 1;
}
else if ((want_value & 1) != 0
- && GET_CODE (target) == MEM
+ && MEM_P (target)
&& ! MEM_VOLATILE_P (target)
&& GET_MODE (target) != BLKmode)
/* If target is in memory and caller wants value in a register instead,
only necessary if the MEM is volatile, or if the address
overlaps TARGET. But not performing the load twice also
reduces the amount of rtl we generate and then have to CSE. */
- if (GET_CODE (temp) == MEM && (want_value & 1) != 0)
+ if (MEM_P (temp) && (want_value & 1) != 0)
temp = copy_to_reg (temp);
/* If TEMP is a VOIDmode constant, use convert_modes to make
Otherwise, if TEMP is not TARGET, return TEMP
if it is constant (for efficiency),
or if we really want the correct value. */
- if (!(target && GET_CODE (target) == REG
+ if (!(target && REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER)
- && !(GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ && !(MEM_P (target) && MEM_VOLATILE_P (target))
&& ! rtx_equal_p (temp, target)
&& (CONSTANT_P (temp) || (want_value & 1) != 0))
dont_return_target = 1;
/* If we are supposed to return TEMP, do so as long as it isn't a MEM.
??? The latter test doesn't seem to make sense. */
- else if (dont_return_target && GET_CODE (temp) != MEM)
+ else if (dont_return_target && !MEM_P (temp))
return temp;
/* Return TARGET itself if it is a hard register. */
else if ((want_value & 1) != 0
&& GET_MODE (target) != BLKmode
- && ! (GET_CODE (target) == REG
+ && ! (REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER))
return copy_to_reg (target);
return target;
}
\f
-/* Examine CTOR. Discover how many scalar fields are set to non-zero
+/* Examine CTOR. Discover how many scalar fields are set to nonzero
values and place it in *P_NZ_ELTS. Discover how many scalar fields
are set to non-constant values and place it in *P_NC_ELTS. */
tree telts = array_type_nelts (type);
if (telts && host_integerp (telts, 1))
{
- HOST_WIDE_INT n = tree_low_cst (telts, 1);
+ HOST_WIDE_INT n = tree_low_cst (telts, 1) + 1;
HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type));
if (n == 0)
return 0;
- if (max / n < m)
+ else if (max / n > m)
return n * m;
}
return -1;
/* If we have a nonzero bitpos for a register target, then we just
let store_field do the bitfield handling. This is unlikely to
generate unnecessary clear instructions anyways. */
- && (bitpos == 0 || GET_CODE (target) == MEM))
+ && (bitpos == 0 || MEM_P (target)))
{
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
target
= adjust_address (target,
GET_MODE (target) == BLKmode
/* Update the alias set, if required. */
- if (GET_CODE (target) == MEM && ! MEM_KEEP_ALIAS_SET_P (target)
+ if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
&& MEM_ALIAS_SET (target) != 0)
{
target = copy_rtx (target);
set the initial value as zero so we can fold the value into
a constant. But if more than one register is involved,
this probably loses. */
- else if (GET_CODE (target) == REG && TREE_STATIC (exp)
+ else if (REG_P (target) && TREE_STATIC (exp)
&& GET_MODE_SIZE (GET_MODE (target)) <= UNITS_PER_WORD)
{
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
clear the whole structure first. Don't do this if TARGET is a
register whose mode size isn't equal to SIZE since clear_storage
can't handle this case. */
- else if (((list_length (CONSTRUCTOR_ELTS (exp)) != fields_length (type))
- || mostly_zeros_p (exp))
- && (GET_CODE (target) != REG
+ else if (size > 0
+ && ((list_length (CONSTRUCTOR_ELTS (exp)) != fields_length (type))
+ || mostly_zeros_p (exp))
+ && (!REG_P (target)
|| ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target))
== size)))
{
target));
offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
- if (GET_CODE (to_rtx) != MEM)
+ if (!MEM_P (to_rtx))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
if (TREE_READONLY (field))
{
- if (GET_CODE (to_rtx) == MEM)
+ if (MEM_P (to_rtx))
to_rtx = copy_rtx (to_rtx);
RTX_UNCHANGING_P (to_rtx) = 1;
start of a word, try to widen it to a full word.
This special case allows us to output C++ member function
initializations in a form that the optimizers can understand. */
- if (GET_CODE (target) == REG
+ if (REG_P (target)
&& bitsize < BITS_PER_WORD
&& bitpos % BITS_PER_WORD == 0
&& GET_MODE_CLASS (mode) == MODE_INT
}
#endif
- if (GET_CODE (to_rtx) == MEM && !MEM_KEEP_ALIAS_SET_P (to_rtx)
+ if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
&& DECL_NONADDRESSABLE_P (field))
{
to_rtx = copy_rtx (to_rtx);
/* If the constructor has fewer elements than the array,
clear the whole array first. Similarly if this is
static constructor of a non-BLKmode object. */
- if (cleared || (GET_CODE (target) == REG && TREE_STATIC (exp)))
+ if (cleared || (REG_P (target) && TREE_STATIC (exp)))
need_to_clear = 1;
else
{
{
tree lo_index = TREE_OPERAND (index, 0);
tree hi_index = TREE_OPERAND (index, 1);
- rtx index_r, pos_rtx, loop_end;
- struct nesting *loop;
+ rtx index_r, pos_rtx;
HOST_WIDE_INT lo, hi, count;
tree position;
&& (lo = tree_low_cst (lo_index, 0),
hi = tree_low_cst (hi_index, 0),
count = hi - lo + 1,
- (GET_CODE (target) != MEM
+ (!MEM_P (target)
|| count <= 2
|| (host_integerp (TYPE_SIZE (elttype), 1)
&& (tree_low_cst (TYPE_SIZE (elttype), 1) * count
{
bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0);
- if (GET_CODE (target) == MEM
+ if (MEM_P (target)
&& !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
}
else
{
- expand_expr (hi_index, NULL_RTX, VOIDmode, 0);
- loop_end = gen_label_rtx ();
+ rtx loop_start = gen_label_rtx ();
+ rtx loop_end = gen_label_rtx ();
+ tree exit_cond;
+ expand_expr (hi_index, NULL_RTX, VOIDmode, 0);
unsignedp = TYPE_UNSIGNED (domain);
index = build_decl (VAR_DECL, NULL_TREE, domain);
= gen_reg_rtx (promote_mode (domain, DECL_MODE (index),
&unsignedp, 0));
SET_DECL_RTL (index, index_r);
- if (TREE_CODE (value) == SAVE_EXPR
- && SAVE_EXPR_RTL (value) == 0)
- {
- /* Make sure value gets expanded once before the
- loop. */
- expand_expr (value, const0_rtx, VOIDmode, 0);
- emit_queue ();
- }
store_expr (lo_index, index_r, 0);
- loop = expand_start_loop (0);
+
+ /* Build the head of the loop. */
+ do_pending_stack_adjust ();
+ emit_queue ();
+ emit_label (loop_start);
/* Assign value to element index. */
position
else
store_expr (value, xtarget, 0);
- expand_exit_loop_if_false (loop,
- build (LT_EXPR, integer_type_node,
- index, hi_index));
+ /* Generate a conditional jump to exit the loop. */
+ exit_cond = build (LT_EXPR, integer_type_node,
+ index, hi_index);
+ jumpif (exit_cond, loop_end);
+ /* Update the loop counter, and jump to the head of
+ the loop. */
expand_increment (build (PREINCREMENT_EXPR,
TREE_TYPE (index),
index, integer_one_node), 0, 0);
- expand_end_loop ();
+ emit_jump (loop_start);
+
+ /* Build the end of the loop. */
emit_label (loop_end);
}
}
else
bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1));
- if (GET_CODE (target) == MEM && !MEM_KEEP_ALIAS_SET_P (target)
+ if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
{
and then "or" in whatever non-constant ranges we need in addition.
If a large set is all zero or all ones, it is
- probably better to set it using memset (if available) or bzero.
+ probably better to set it using memset.
Also, if a large set has just a single range, it may also be
better to first clear all the first clear the set (using
- bzero/memset), and set the bits we want. */
+ memset), and set the bits we want. */
/* Check for all zeros. */
if (elt == NULL_TREE && size > 0)
/* The assumption here is that it is safe to use
XEXP if the set is multi-word, but not if
it's single-word. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
to_rtx = adjust_address (target, mode, offset);
else if (offset == 0)
to_rtx = target;
emit_move_insn (targetx, target);
}
- else if (GET_CODE (target) == MEM)
+ else if (MEM_P (target))
targetx = target;
else
abort ();
/* Optimization: If startbit and endbit are constants divisible
by BITS_PER_UNIT, call memset instead. */
- if (TARGET_MEM_FUNCTIONS
- && TREE_CODE (startbit) == INTEGER_CST
+ if (TREE_CODE (startbit) == INTEGER_CST
&& TREE_CODE (endbit) == INTEGER_CST
&& (startb = TREE_INT_CST_LOW (startbit)) % BITS_PER_UNIT == 0
&& (endb = TREE_INT_CST_LOW (endbit) + 1) % BITS_PER_UNIT == 0)
twice, once with emit_move_insn and once via store_field. */
if (mode == BLKmode
- && (GET_CODE (target) == REG || GET_CODE (target) == SUBREG))
+ && (REG_P (target) || GET_CODE (target) == SUBREG))
{
rtx object = assign_temp (type, 0, 1, 1);
rtx blk_object = adjust_address (object, BLKmode, 0);
|| (mode != BLKmode && ! direct_store[(int) mode]
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
- || GET_CODE (target) == REG
+ || REG_P (target)
|| GET_CODE (target) == SUBREG
/* If the field isn't aligned enough to store as an ordinary memref,
store it as a bit field. */
boundary. If so, we simply do a block copy. */
if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode)
{
- if (GET_CODE (target) != MEM || GET_CODE (temp) != MEM
+ if (!MEM_P (target) || !MEM_P (temp)
|| bitpos % BITS_PER_UNIT != 0)
abort ();
/* The caller wants an rtx for the value.
If possible, avoid refetching from the bitfield itself. */
if (width_mask != 0
- && ! (GET_CODE (target) == MEM && MEM_VOLATILE_P (target)))
+ && ! (MEM_P (target) && MEM_VOLATILE_P (target)))
{
tree count;
enum machine_mode tmode;
/* If a value is wanted, it must be the lhs;
so make the address stable for multiple use. */
- if (value_mode != VOIDmode && GET_CODE (addr) != REG
+ if (value_mode != VOIDmode && !REG_P (addr)
&& ! CONSTANT_ADDRESS_P (addr)
/* A frame-pointer reference is already stable. */
&& ! (GET_CODE (addr) == PLUS
else if (TREE_CODE (exp) == COMPONENT_REF)
{
tree field = TREE_OPERAND (exp, 1);
- tree this_offset = DECL_FIELD_OFFSET (field);
+ tree this_offset = component_ref_field_offset (exp);
/* If this field hasn't been filled in yet, don't go
past it. This should only happen when folding expressions
made during type construction. */
if (this_offset == 0)
break;
- else
- this_offset = SUBSTITUTE_PLACEHOLDER_IN_EXPR (this_offset, exp);
offset = size_binop (PLUS_EXPR, offset, this_offset);
bit_offset = size_binop (PLUS_EXPR, bit_offset,
|| TREE_CODE (exp) == ARRAY_RANGE_REF)
{
tree index = TREE_OPERAND (exp, 1);
- tree array = TREE_OPERAND (exp, 0);
- tree domain = TYPE_DOMAIN (TREE_TYPE (array));
- tree low_bound = (domain ? TYPE_MIN_VALUE (domain) : 0);
- tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (array)));
+ tree low_bound = array_ref_low_bound (exp);
+ tree unit_size = array_ref_element_size (exp);
/* We assume all arrays have sizes that are a multiple of a byte.
First subtract the lower bound, if any, in the type of the
index, then convert to sizetype and multiply by the size of the
array element. */
- if (low_bound != 0 && ! integer_zerop (low_bound))
+ if (! integer_zerop (low_bound))
index = fold (build (MINUS_EXPR, TREE_TYPE (index),
index, low_bound));
- /* If the index has a self-referential type, instantiate it with
- the object; likewise for the component size. */
- index = SUBSTITUTE_PLACEHOLDER_IN_EXPR (index, exp);
- unit_size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (unit_size, array);
offset = size_binop (PLUS_EXPR, offset,
size_binop (MULT_EXPR,
convert (sizetype, index),
return exp;
}
+/* Return a tree of sizetype representing the size, in bytes, of the element
+ of EXP, an ARRAY_REF. */
+
+tree
+array_ref_element_size (tree exp)
+{
+ tree aligned_size = TREE_OPERAND (exp, 3);
+ tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+
+ /* If a size was specified in the ARRAY_REF, it's the size measured
+ in alignment units of the element type. So multiply by that value. */
+ if (aligned_size)
+ return size_binop (MULT_EXPR, aligned_size,
+ size_int (TYPE_ALIGN (elmt_type) / BITS_PER_UNIT));
+
+ /* Otherwise, take the size from that of the element type. Substitute
+ any PLACEHOLDER_EXPR that we have. */
+ else
+ return SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_SIZE_UNIT (elmt_type), exp);
+}
+
+/* Return a tree representing the lower bound of the array mentioned in
+ EXP, an ARRAY_REF. */
+
+tree
+array_ref_low_bound (tree exp)
+{
+ tree domain_type = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (exp, 0)));
+
+ /* If a lower bound is specified in EXP, use it. */
+ if (TREE_OPERAND (exp, 2))
+ return TREE_OPERAND (exp, 2);
+
+ /* Otherwise, if there is a domain type and it has a lower bound, use it,
+ substituting for a PLACEHOLDER_EXPR as needed. */
+ if (domain_type && TYPE_MIN_VALUE (domain_type))
+ return SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_MIN_VALUE (domain_type), exp);
+
+ /* Otherwise, return a zero of the appropriate type. */
+ return fold_convert (TREE_TYPE (TREE_OPERAND (exp, 1)), integer_zero_node);
+}
+
+/* Return a tree representing the offset, in bytes, of the field referenced
+ by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */
+
+tree
+component_ref_field_offset (tree exp)
+{
+ tree aligned_offset = TREE_OPERAND (exp, 2);
+ tree field = TREE_OPERAND (exp, 1);
+
+ /* If an offset was specified in the COMPONENT_REF, it's the offset measured
+ in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. So multiply by that
+ value. */
+ if (aligned_offset)
+ return size_binop (MULT_EXPR, aligned_offset,
+ size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT));
+
+ /* Otherwise, take the offset from that of the field. Substitute
+ any PLACEHOLDER_EXPR that we have. */
+ else
+ return SUBSTITUTE_PLACEHOLDER_IN_EXPR (DECL_FIELD_OFFSET (field), exp);
+}
+
/* Return 1 if T is an expression that get_inner_reference handles. */
int
/* Check for subreg applied to an expression produced by loop optimizer. */
if (code == SUBREG
- && GET_CODE (SUBREG_REG (value)) != REG
- && GET_CODE (SUBREG_REG (value)) != MEM)
+ && !REG_P (SUBREG_REG (value))
+ && !MEM_P (SUBREG_REG (value)))
{
value = simplify_gen_subreg (GET_MODE (value),
force_reg (GET_MODE (SUBREG_REG (value)),
if (ARITHMETIC_P (value))
{
op2 = XEXP (value, 1);
- if (!CONSTANT_P (op2) && !(GET_CODE (op2) == REG && op2 != subtarget))
+ if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
subtarget = 0;
if (code == MINUS && GET_CODE (op2) == CONST_INT)
{
creating another one around this addition. */
if (code == PLUS && GET_CODE (op2) == CONST_INT
&& GET_CODE (XEXP (value, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (value, 0), 0)) == REG
+ && REG_P (XEXP (XEXP (value, 0), 0))
&& REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
&& REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
{
#ifdef INSN_SCHEDULING
/* On machines that have insn scheduling, we want all memory reference to be
explicit, so we need to deal with such paradoxical SUBREGs. */
- if (GET_CODE (value) == SUBREG && GET_CODE (SUBREG_REG (value)) == MEM
+ if (GET_CODE (value) == SUBREG && MEM_P (SUBREG_REG (value))
&& (GET_MODE_SIZE (GET_MODE (value))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (value)))))
value
{
rtx exp_rtl = 0;
int i, nops;
- static tree save_expr_list;
if (x == 0
/* If EXP has varying size, we MUST use a target since we currently
!= INTEGER_CST)
&& GET_MODE (x) == BLKmode)
/* If X is in the outgoing argument area, it is always safe. */
- || (GET_CODE (x) == MEM
+ || (MEM_P (x)
&& (XEXP (x, 0) == virtual_outgoing_args_rtx
|| (GET_CODE (XEXP (x, 0)) == PLUS
&& XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
if (GET_CODE (x) == SUBREG)
{
x = SUBREG_REG (x);
- if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
return 0;
}
- /* A SAVE_EXPR might appear many times in the expression passed to the
- top-level safe_from_p call, and if it has a complex subexpression,
- examining it multiple times could result in a combinatorial explosion.
- E.g. on an Alpha running at least 200MHz, a Fortran testcase compiled
- with optimization took about 28 minutes to compile -- even though it was
- only a few lines long. So we mark each SAVE_EXPR we see with TREE_PRIVATE
- and turn that off when we are done. We keep a list of the SAVE_EXPRs
- we have processed. Note that the only test of top_p was above. */
-
- if (top_p)
- {
- int rtn;
- tree t;
-
- save_expr_list = 0;
-
- rtn = safe_from_p (x, exp, 0);
-
- for (t = save_expr_list; t != 0; t = TREE_CHAIN (t))
- TREE_PRIVATE (TREE_PURPOSE (t)) = 0;
-
- return rtn;
- }
-
/* Now look at our tree code and possibly recurse. */
switch (TREE_CODE_CLASS (TREE_CODE (exp)))
{
else
return 0;
+ case 's':
+ /* The only case we look at here is the DECL_INITIAL inside a
+ DECL_EXPR. */
+ return (TREE_CODE (exp) != DECL_EXPR
+ || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
+ || !DECL_INITIAL (DECL_EXPR_DECL (exp))
+ || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
+
case '2':
case '<':
if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
if (DECL_P (exp))
{
if (!DECL_RTL_SET_P (exp)
- || GET_CODE (DECL_RTL (exp)) != MEM)
+ || !MEM_P (DECL_RTL (exp)))
return 0;
else
exp_rtl = XEXP (DECL_RTL (exp), 0);
break;
case INDIRECT_REF:
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
get_alias_set (exp)))
return 0;
case CALL_EXPR:
/* Assume that the call will clobber all hard registers and
all of memory. */
- if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
- || GET_CODE (x) == MEM)
- return 0;
- break;
-
- case RTL_EXPR:
- /* If a sequence exists, we would have to scan every instruction
- in the sequence to see if it was safe. This is probably not
- worthwhile. */
- if (RTL_EXPR_SEQUENCE (exp))
+ if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ || MEM_P (x))
return 0;
-
- exp_rtl = RTL_EXPR_RTL (exp);
break;
case WITH_CLEANUP_EXPR:
break;
case CLEANUP_POINT_EXPR:
- return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
-
case SAVE_EXPR:
- exp_rtl = SAVE_EXPR_RTL (exp);
- if (exp_rtl)
- break;
-
- /* If we've already scanned this, don't do it again. Otherwise,
- show we've scanned it and record for clearing the flag if we're
- going on. */
- if (TREE_PRIVATE (exp))
- return 1;
-
- TREE_PRIVATE (exp) = 1;
- if (! safe_from_p (x, TREE_OPERAND (exp, 0), 0))
- {
- TREE_PRIVATE (exp) = 0;
- return 0;
- }
-
- save_expr_list = tree_cons (exp, NULL_TREE, save_expr_list);
- return 1;
+ return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
case BIND_EXPR:
/* The only operand we look at is operand 1. The rest aren't
if (GET_CODE (exp_rtl) == SUBREG)
{
exp_rtl = SUBREG_REG (exp_rtl);
- if (GET_CODE (exp_rtl) == REG
+ if (REG_P (exp_rtl)
&& REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
return 0;
}
/* If the rtl is X, then it is not safe. Otherwise, it is unless both
are memory and they conflict. */
return ! (rtx_equal_p (x, exp_rtl)
- || (GET_CODE (x) == MEM && GET_CODE (exp_rtl) == MEM
+ || (MEM_P (x) && MEM_P (exp_rtl)
&& true_dependence (exp_rtl, VOIDmode, x,
rtx_addr_varies_p)));
}
emit_line_note (input_location);
/* Record where the insns produced belong. */
- if (cfun->dont_emit_block_notes)
- record_block_change (TREE_BLOCK (exp));
+ record_block_change (TREE_BLOCK (exp));
ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl);
original_target = target;
ignore = (target == const0_rtx
|| ((code == NON_LVALUE_EXPR || code == NOP_EXPR
- || code == CONVERT_EXPR || code == REFERENCE_EXPR
- || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
+ || code == CONVERT_EXPR || code == COND_EXPR
+ || code == VIEW_CONVERT_EXPR)
&& TREE_CODE (type) == VOID_TYPE));
/* If we are going to ignore this result, we need only do something
&& modifier != EXPAND_CONST_ADDRESS)
{
temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
- if (GET_CODE (temp) == MEM)
+ if (MEM_P (temp))
temp = copy_to_reg (temp);
return const0_rtx;
}
Another is a CALL_EXPR which must return in memory. */
if (! cse_not_expected && mode != BLKmode && target
- && (GET_CODE (target) != REG || REGNO (target) < FIRST_PSEUDO_REGISTER)
+ && (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
&& ! (code == CONSTRUCTOR && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
&& ! (code == CALL_EXPR && aggregate_value_p (exp, exp)))
target = 0;
if (context != 0 && context != current_function_decl
/* If var is static, we don't need a static chain to access it. */
- && ! (GET_CODE (DECL_RTL (exp)) == MEM
+ && ! (MEM_P (DECL_RTL (exp))
&& CONSTANT_P (XEXP (DECL_RTL (exp), 0))))
{
rtx addr;
if (DECL_NO_STATIC_CHAIN (current_function_decl))
abort ();
lang_hooks.mark_addressable (exp);
- if (GET_CODE (DECL_RTL (exp)) != MEM)
+ if (!MEM_P (DECL_RTL (exp)))
abort ();
addr = XEXP (DECL_RTL (exp), 0);
- if (GET_CODE (addr) == MEM)
+ if (MEM_P (addr))
addr
= replace_equiv_address (addr,
fix_lexical_addr (XEXP (addr, 0), exp));
from its initializer, while the initializer is still being parsed.
See expand_decl. */
- else if (GET_CODE (DECL_RTL (exp)) == MEM
- && GET_CODE (XEXP (DECL_RTL (exp), 0)) == REG)
+ else if (MEM_P (DECL_RTL (exp))
+ && REG_P (XEXP (DECL_RTL (exp), 0)))
temp = validize_mem (DECL_RTL (exp));
/* If DECL_RTL is memory, we are in the normal case and either
the address is not valid or it is not a register and -fforce-addr
is specified, get the address into a register. */
- else if (GET_CODE (DECL_RTL (exp)) == MEM
+ else if (MEM_P (DECL_RTL (exp))
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_SUM
&& modifier != EXPAND_INITIALIZER
&& (! memory_address_p (DECL_MODE (exp),
XEXP (DECL_RTL (exp), 0))
|| (flag_force_addr
- && GET_CODE (XEXP (DECL_RTL (exp), 0)) != REG)))
+ && !REG_P (XEXP (DECL_RTL (exp), 0)))))
{
if (alt_rtl)
*alt_rtl = DECL_RTL (exp);
if the address is a register. */
if (temp != 0)
{
- if (GET_CODE (temp) == MEM && GET_CODE (XEXP (temp, 0)) == REG)
+ if (MEM_P (temp) && REG_P (XEXP (temp, 0)))
mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
return temp;
must be a promoted value. We return a SUBREG of the wanted mode,
but mark it so that we know that it was already extended. */
- if (GET_CODE (DECL_RTL (exp)) == REG
+ if (REG_P (DECL_RTL (exp))
&& GET_MODE (DECL_RTL (exp)) != DECL_MODE (exp))
{
/* Get the signedness used for this variable. Ensure we get the
return temp;
case SAVE_EXPR:
- context = decl_function_context (exp);
-
- /* If this SAVE_EXPR was at global context, assume we are an
- initialization function and move it into our context. */
- if (context == 0)
- SAVE_EXPR_CONTEXT (exp) = current_function_decl;
-
- if (context == current_function_decl)
- context = 0;
-
- /* If this is non-local, handle it. */
- if (context)
- {
- /* The following call just exists to abort if the context is
- not of a containing function. */
- find_function_data (context);
-
- temp = SAVE_EXPR_RTL (exp);
- if (temp && GET_CODE (temp) == REG)
- {
- put_var_into_stack (exp, /*rescan=*/true);
- temp = SAVE_EXPR_RTL (exp);
- }
- if (temp == 0 || GET_CODE (temp) != MEM)
- abort ();
- return
- replace_equiv_address (temp,
- fix_lexical_addr (XEXP (temp, 0), exp));
- }
- if (SAVE_EXPR_RTL (exp) == 0)
- {
- if (mode == VOIDmode)
- temp = const0_rtx;
- else
- temp = assign_temp (build_qualified_type (type,
- (TYPE_QUALS (type)
- | TYPE_QUAL_CONST)),
- 3, 0, 0);
-
- SAVE_EXPR_RTL (exp) = temp;
- if (!optimize && GET_CODE (temp) == REG)
- save_expr_regs = gen_rtx_EXPR_LIST (VOIDmode, temp,
- save_expr_regs);
-
- /* If the mode of TEMP does not match that of the expression, it
- must be a promoted value. We pass store_expr a SUBREG of the
- wanted mode but mark it so that we know that it was already
- extended. */
-
- if (GET_CODE (temp) == REG && GET_MODE (temp) != mode)
- {
- temp = gen_lowpart_SUBREG (mode, SAVE_EXPR_RTL (exp));
- promote_mode (type, mode, &unsignedp, 0);
- SUBREG_PROMOTED_VAR_P (temp) = 1;
- SUBREG_PROMOTED_UNSIGNED_SET (temp, unsignedp);
- }
-
- if (temp == const0_rtx)
- expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
- else
- store_expr (TREE_OPERAND (exp, 0), temp,
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ {
+ tree val = TREE_OPERAND (exp, 0);
+ rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl);
- TREE_USED (exp) = 1;
- }
+ if (TREE_CODE (val) != VAR_DECL || !DECL_ARTIFICIAL (val))
+ {
+ /* We can indeed still hit this case, typically via builtin
+ expanders calling save_expr immediately before expanding
+ something. Assume this means that we only have to deal
+ with non-BLKmode values. */
+ if (GET_MODE (ret) == BLKmode)
+ abort ();
- /* If the mode of SAVE_EXPR_RTL does not match that of the expression, it
- must be a promoted value. We return a SUBREG of the wanted mode,
- but mark it so that we know that it was already extended. */
+ val = build_decl (VAR_DECL, NULL, TREE_TYPE (exp));
+ DECL_ARTIFICIAL (val) = 1;
+ TREE_OPERAND (exp, 0) = val;
- if (GET_CODE (SAVE_EXPR_RTL (exp)) == REG
- && GET_MODE (SAVE_EXPR_RTL (exp)) != mode)
- {
- /* Compute the signedness and make the proper SUBREG. */
- promote_mode (type, mode, &unsignedp, 0);
- temp = gen_lowpart_SUBREG (mode, SAVE_EXPR_RTL (exp));
- SUBREG_PROMOTED_VAR_P (temp) = 1;
- SUBREG_PROMOTED_UNSIGNED_SET (temp, unsignedp);
- return temp;
- }
+ if (!CONSTANT_P (ret))
+ ret = copy_to_reg (ret);
+ SET_DECL_RTL (val, ret);
+ }
- return SAVE_EXPR_RTL (exp);
+ return ret;
+ }
case UNSAVE_EXPR:
{
expand_computed_goto (TREE_OPERAND (exp, 0));
return const0_rtx;
+ /* These are lowered during gimplification, so we should never ever
+ see them here. */
+ case LOOP_EXPR:
case EXIT_EXPR:
- expand_exit_loop_if_false (NULL,
- invert_truthvalue (TREE_OPERAND (exp, 0)));
- return const0_rtx;
+ abort ();
case LABELED_BLOCK_EXPR:
if (LABELED_BLOCK_BODY (exp))
- expand_expr_stmt_value (LABELED_BLOCK_BODY (exp), 0, 1);
+ expand_expr_stmt (LABELED_BLOCK_BODY (exp));
/* Should perhaps use expand_label, but this is simpler and safer. */
do_pending_stack_adjust ();
emit_label (label_rtx (LABELED_BLOCK_LABEL (exp)));
expand_goto (LABELED_BLOCK_LABEL (EXIT_BLOCK_LABELED_BLOCK (exp)));
return const0_rtx;
- case LOOP_EXPR:
- push_temp_slots ();
- expand_start_loop (1);
- expand_expr_stmt_value (TREE_OPERAND (exp, 0), 0, 1);
- expand_end_loop ();
- pop_temp_slots ();
-
- return const0_rtx;
-
case BIND_EXPR:
{
tree block = BIND_EXPR_BLOCK (exp);
int mark_ends;
- if (TREE_CODE (BIND_EXPR_BODY (exp)) != RTL_EXPR)
- {
- /* If we're in functions-as-trees mode, this BIND_EXPR represents
- the block, so we need to emit NOTE_INSN_BLOCK_* notes. */
- mark_ends = (block != NULL_TREE);
- expand_start_bindings_and_block (mark_ends ? 0 : 2, block);
- }
- else
- {
- /* If we're not in functions-as-trees mode, we've already emitted
- those notes into our RTL_EXPR, so we just want to splice our BLOCK
- into the enclosing one. */
- mark_ends = 0;
-
- /* Need to open a binding contour here because
- if there are any cleanups they must be contained here. */
- expand_start_bindings_and_block (2, NULL_TREE);
-
- /* Mark the corresponding BLOCK for output in its proper place. */
- if (block)
- {
- if (TREE_USED (block))
- abort ();
- lang_hooks.decls.insert_block (block);
- }
- }
+ /* If we're in functions-as-trees mode, this BIND_EXPR represents
+ the block, so we need to emit NOTE_INSN_BLOCK_* notes. */
+ mark_ends = (block != NULL_TREE);
+ expand_start_bindings_and_block (mark_ends ? 0 : 2, block);
/* If VARS have not yet been expanded, expand them now. */
expand_vars (BIND_EXPR_VARS (exp));
return temp;
}
- case RTL_EXPR:
- if (RTL_EXPR_SEQUENCE (exp))
- {
- if (RTL_EXPR_SEQUENCE (exp) == const0_rtx)
- abort ();
- emit_insn (RTL_EXPR_SEQUENCE (exp));
- RTL_EXPR_SEQUENCE (exp) = const0_rtx;
- }
- preserve_rtl_expr_result (RTL_EXPR_RTL (exp));
- free_temps_for_rtl_expr (exp);
- if (alt_rtl)
- *alt_rtl = RTL_EXPR_ALT_RTL (exp);
- return RTL_EXPR_RTL (exp);
-
case CONSTRUCTOR:
/* If we don't need the result, just ensure we evaluate any
subexpressions. */
{
tree array = TREE_OPERAND (exp, 0);
- tree domain = TYPE_DOMAIN (TREE_TYPE (array));
- tree low_bound = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
+ tree low_bound = array_ref_low_bound (exp);
tree index = convert (sizetype, TREE_OPERAND (exp, 1));
HOST_WIDE_INT i;
C, but can in Ada if we have unchecked conversion of an expression
from a scalar type to an array or record type or for an
ARRAY_RANGE_REF whose type is BLKmode. */
- else if (GET_CODE (op0) != MEM
+ else if (!MEM_P (op0)
&& (offset != 0
|| (code == ARRAY_RANGE_REF && mode == BLKmode)))
{
- /* If the operand is a SAVE_EXPR, we can deal with this by
- forcing the SAVE_EXPR into memory. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR)
- {
- put_var_into_stack (TREE_OPERAND (exp, 0),
- /*rescan=*/true);
- op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0));
- }
- else
- {
- tree nt
- = build_qualified_type (TREE_TYPE (tem),
- (TYPE_QUALS (TREE_TYPE (tem))
- | TYPE_QUAL_CONST));
- rtx memloc = assign_temp (nt, 1, 1, 1);
+ tree nt = build_qualified_type (TREE_TYPE (tem),
+ (TYPE_QUALS (TREE_TYPE (tem))
+ | TYPE_QUAL_CONST));
+ rtx memloc = assign_temp (nt, 1, 1, 1);
- emit_move_insn (memloc, op0);
- op0 = memloc;
- }
+ emit_move_insn (memloc, op0);
+ op0 = memloc;
}
if (offset != 0)
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
EXPAND_SUM);
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
record its alignment as BIGGEST_ALIGNMENT. */
- if (GET_CODE (op0) == MEM && bitpos == 0 && offset != 0
+ if (MEM_P (op0) && bitpos == 0 && offset != 0
&& is_aligning_offset (offset, tem))
set_mem_align (op0, BIGGEST_ALIGNMENT);
/* Don't forget about volatility even if this is a bitfield. */
- if (GET_CODE (op0) == MEM && volatilep && ! MEM_VOLATILE_P (op0))
+ if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
{
if (op0 == orig_op0)
op0 = copy_rtx (op0);
(which we know to be the width of a basic mode), then
storing into memory, and changing the mode to BLKmode. */
if (mode1 == VOIDmode
- || GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
+ || REG_P (op0) || GET_CODE (op0) == SUBREG
|| (mode1 != BLKmode && ! direct_load[(int) mode1]
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
|| (mode1 != BLKmode
&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
|| (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
- || (GET_CODE (op0) == MEM
+ || (MEM_P (op0)
&& (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
|| (bitpos % GET_MODE_ALIGNMENT (mode1) != 0))))
&& ((modifier == EXPAND_CONST_ADDRESS
enum machine_mode ext_mode = mode;
if (ext_mode == BLKmode
- && ! (target != 0 && GET_CODE (op0) == MEM
- && GET_CODE (target) == MEM
+ && ! (target != 0 && MEM_P (op0)
+ && MEM_P (target)
&& bitpos % BITS_PER_UNIT == 0))
ext_mode = mode_for_size (bitsize, MODE_INT, 1);
/* In this case, BITPOS must start at a byte boundary and
TARGET, if specified, must be a MEM. */
- if (GET_CODE (op0) != MEM
- || (target != 0 && GET_CODE (target) != MEM)
+ if (!MEM_P (op0)
+ || (target != 0 && !MEM_P (target))
|| bitpos % BITS_PER_UNIT != 0)
abort ();
op0 = validize_mem (op0);
- if (GET_CODE (op0) == MEM && GET_CODE (XEXP (op0, 0)) == REG)
+ if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
op0 = copy_rtx (op0);
set_mem_attributes (op0, exp, 0);
- if (GET_CODE (XEXP (op0, 0)) == REG)
+ if (REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
MEM_VOLATILE_P (op0) |= volatilep;
return target;
}
- case VTABLE_REF:
- {
- rtx insn, before = get_last_insn (), vtbl_ref;
-
- /* Evaluate the interior expression. */
- subtarget = expand_expr (TREE_OPERAND (exp, 0), target,
- tmode, modifier);
-
- /* Get or create an instruction off which to hang a note. */
- if (REG_P (subtarget))
- {
- target = subtarget;
- insn = get_last_insn ();
- if (insn == before)
- abort ();
- if (! INSN_P (insn))
- insn = prev_nonnote_insn (insn);
- }
- else
- {
- target = gen_reg_rtx (GET_MODE (subtarget));
- insn = emit_move_insn (target, subtarget);
- }
-
- /* Collect the data for the note. */
- vtbl_ref = XEXP (DECL_RTL (TREE_OPERAND (exp, 1)), 0);
- vtbl_ref = plus_constant (vtbl_ref,
- tree_low_cst (TREE_OPERAND (exp, 2), 0));
- /* Discard the initial CONST that was added. */
- vtbl_ref = XEXP (vtbl_ref, 0);
-
- REG_NOTES (insn)
- = gen_rtx_EXPR_LIST (REG_VTABLE_REF, vtbl_ref, REG_NOTES (insn));
-
- return target;
- }
+ case OBJ_TYPE_REF:
+ return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
/* Intended for a reference to a buffer of a file-object in Pascal.
But it's not certain that a special tree code will really be
case NON_LVALUE_EXPR:
case NOP_EXPR:
case CONVERT_EXPR:
- case REFERENCE_EXPR:
if (TREE_OPERAND (exp, 0) == error_mark_node)
return const0_rtx;
target = assign_temp (type, 0, 1, 1);
}
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
/* Store data into beginning of memory target. */
store_expr (TREE_OPERAND (exp, 0),
adjust_address (target, TYPE_MODE (valtype), 0),
modifier == EXPAND_STACK_PARM ? 2 : 0);
- else if (GET_CODE (target) == REG)
+ else if (REG_P (target))
/* Store this field into a union of the proper type. */
store_field (target,
MIN ((int_size_in_bytes (TREE_TYPE
&& GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD
&& GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD)
op0 = gen_lowpart (TYPE_MODE (type), op0);
- else if (GET_CODE (op0) != MEM)
+ else if (!MEM_P (op0))
{
/* If the operand is not a MEM, force it into memory. Since we
are going to be be changing the mode of the MEM, don't call
that the operand is known to be aligned, indicate that it is.
Otherwise, we need only be concerned about alignment for non-BLKmode
results. */
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
{
op0 = copy_rtx (op0);
&& (GET_MODE_CLASS (mode) == MODE_INT)
? addv_optab : add_optab;
- /* If we are adding a constant, an RTL_EXPR that is sp, fp, or ap, and
+ /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
something else, make sure we add the register to the constant and
then to the other thing. This case can occur during strength
reduction and doing it this way will produce better code if the
if (TREE_CODE (TREE_OPERAND (exp, 0)) == PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) == INTEGER_CST
- && TREE_CODE (TREE_OPERAND (exp, 1)) == RTL_EXPR
- && (RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx
- || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx
- || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx))
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == VAR_DECL
+ && (DECL_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx
+ || DECL_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx
+ || DECL_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx))
{
tree t = TREE_OPERAND (exp, 1);
op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode,
EXPAND_SUM);
- if (GET_CODE (op0) != REG)
+ if (!REG_P (op0))
op0 = force_operand (op0, NULL_RTX);
- if (GET_CODE (op0) != REG)
+ if (!REG_P (op0))
op0 = copy_to_mode_reg (mode, op0);
return gen_rtx_MULT (mode, op0,
target = original_target;
if (target == 0
|| modifier == EXPAND_STACK_PARM
- || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (MEM_P (target) && MEM_VOLATILE_P (target))
|| GET_MODE (target) != mode
- || (GET_CODE (target) == REG
+ || (REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER))
target = gen_reg_rtx (mode);
expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1),
/* At this point, a MEM target is no longer useful; we will get better
code without it. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
target = gen_reg_rtx (mode);
/* If op1 was placed in target, swap op0 and op1. */
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
+ case LTGT_EXPR:
temp = do_store_flag (exp,
modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
tmode != VOIDmode ? tmode : mode, 0);
/* For foo != 0, load foo, and if it is nonzero load 1 instead. */
if (code == NE_EXPR && integer_zerop (TREE_OPERAND (exp, 1))
&& original_target
- && GET_CODE (original_target) == REG
+ && REG_P (original_target)
&& (GET_MODE (original_target)
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
{
|| ! safe_from_p (target, exp, 1)
/* Make sure we don't have a hard reg (such as function's return
value) live across basic blocks, if not optimizing. */
- || (!optimize && GET_CODE (target) == REG
+ || (!optimize && REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER)))
target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
if (EXPR_HAS_LOCATION (exp))
{
emit_line_note (EXPR_LOCATION (exp));
- if (cfun->dont_emit_block_notes)
- record_block_change (TREE_BLOCK (exp));
+ record_block_change (TREE_BLOCK (exp));
}
expand_elseif (TREE_OPERAND (exp, 0));
expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, 0);
temp = assign_temp (type, 0, 0, 1);
else if (original_target
&& (safe_from_p (original_target, TREE_OPERAND (exp, 0), 1)
- || (singleton && GET_CODE (original_target) == REG
+ || (singleton && REG_P (original_target)
&& REGNO (original_target) >= FIRST_PSEUDO_REGISTER
&& original_target == var_rtx (singleton)))
&& GET_MODE (original_target) == mode
#ifdef HAVE_conditional_move
&& (! can_conditionally_move_p (mode)
- || GET_CODE (original_target) == REG
+ || REG_P (original_target)
|| TREE_ADDRESSABLE (type))
#endif
- && (GET_CODE (original_target) != MEM
+ && (!MEM_P (original_target)
|| TREE_ADDRESSABLE (type)))
temp = original_target;
else if (TREE_ADDRESSABLE (type))
might clobber it. */
if ((binary_op
&& ! safe_from_p (temp, TREE_OPERAND (binary_op, 1), 1))
- || (GET_CODE (temp) == REG
+ || (REG_P (temp)
&& REGNO (temp) < FIRST_PSEUDO_REGISTER))
temp = gen_reg_rtx (mode);
store_expr (singleton, temp,
|| TREE_CODE (TREE_OPERAND (exp, 1)) == SAVE_EXPR)
&& safe_from_p (temp, TREE_OPERAND (exp, 2), 1))
{
- if (GET_CODE (temp) == REG
+ if (REG_P (temp)
&& REGNO (temp) < FIRST_PSEUDO_REGISTER)
temp = gen_reg_rtx (mode);
store_expr (TREE_OPERAND (exp, 1), temp,
|| TREE_CODE (TREE_OPERAND (exp, 2)) == SAVE_EXPR)
&& safe_from_p (temp, TREE_OPERAND (exp, 1), 1))
{
- if (GET_CODE (temp) == REG
+ if (REG_P (temp)
&& REGNO (temp) < FIRST_PSEUDO_REGISTER)
temp = gen_reg_rtx (mode);
store_expr (TREE_OPERAND (exp, 2), temp,
{
target = assign_temp (type, 2, 0, 1);
SET_DECL_RTL (slot, target);
- if (TREE_ADDRESSABLE (slot))
- put_var_into_stack (slot, /*rescan=*/false);
/* Since SLOT is not known to the called function
to belong to its stack frame, we must build an explicit
return target;
}
else
- {
- SET_DECL_RTL (slot, target);
- /* If we must have an addressable slot, then make sure that
- the RTL that we just stored in slot is OK. */
- if (TREE_ADDRESSABLE (slot))
- put_var_into_stack (slot, /*rescan=*/true);
- }
+ SET_DECL_RTL (slot, target);
}
exp1 = TREE_OPERAND (exp, 3) = TREE_OPERAND (exp, 1);
/* Mark it as expanded. */
TREE_OPERAND (exp, 1) = NULL_TREE;
- store_expr (exp1, target, modifier == EXPAND_STACK_PARM ? 2 : 0);
+ if (VOID_TYPE_P (TREE_TYPE (exp1)))
+ /* If the initializer is void, just expand it; it will initialize
+ the object directly. */
+ expand_expr (exp1, const0_rtx, VOIDmode, 0);
+ else
+ store_expr (exp1, target, modifier == EXPAND_STACK_PARM ? 2 : 0);
expand_decl_cleanup_eh (NULL_TREE, cleanups, CLEANUP_EH_ONLY (exp));
if (CONSTANT_P (op0))
op0 = force_const_mem (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
op0);
- else if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
- || GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF
- || GET_CODE (op0) == PARALLEL || GET_CODE (op0) == LO_SUM)
+ else if (REG_P (op0) || GET_CODE (op0) == SUBREG
+ || GET_CODE (op0) == CONCAT || GET_CODE (op0) == PARALLEL
+ || GET_CODE (op0) == LO_SUM)
{
- /* If the operand is a SAVE_EXPR, we can deal with this by
- forcing the SAVE_EXPR into memory. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR)
- {
- put_var_into_stack (TREE_OPERAND (exp, 0),
- /*rescan=*/true);
- op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0));
- }
+ /* If this object is in a register, it can't be BLKmode. */
+ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ rtx memloc = assign_temp (inner_type, 1, 1, 1);
+
+ if (GET_CODE (op0) == PARALLEL)
+ /* Handle calls that pass values in multiple
+ non-contiguous locations. The Irix 6 ABI has examples
+ of this. */
+ emit_group_store (memloc, op0, inner_type,
+ int_size_in_bytes (inner_type));
else
- {
- /* If this object is in a register, it can't be BLKmode. */
- tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
- rtx memloc = assign_temp (inner_type, 1, 1, 1);
-
- if (GET_CODE (op0) == PARALLEL)
- /* Handle calls that pass values in multiple
- non-contiguous locations. The Irix 6 ABI has examples
- of this. */
- emit_group_store (memloc, op0, inner_type,
- int_size_in_bytes (inner_type));
- else
- emit_move_insn (memloc, op0);
+ emit_move_insn (memloc, op0);
- op0 = memloc;
- }
+ op0 = memloc;
}
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
abort ();
mark_temp_addr_taken (op0);
}
if (flag_force_addr
- && GET_CODE (op0) != REG
+ && !REG_P (op0)
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
&& modifier != EXPAND_SUM)
op0 = force_reg (Pmode, op0);
- if (GET_CODE (op0) == REG
+ if (REG_P (op0)
&& ! REG_USERVAR_P (op0))
mark_reg_pointer (op0, TYPE_ALIGN (TREE_TYPE (type)));
if (case_low && case_high)
{
/* Case label is less than minimum for type. */
- if ((tree_int_cst_compare (case_low, min_value) < 0)
- && (tree_int_cst_compare (case_high, min_value) < 0))
+ if (TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (case_low, min_value) < 0
+ && tree_int_cst_compare (case_high, min_value) < 0)
{
warning ("case label value %d is less than minimum value for type",
TREE_INT_CST (case_low));
}
/* Case value is greater than maximum for type. */
- if ((tree_int_cst_compare (case_low, max_value) > 0)
- && (tree_int_cst_compare (case_high, max_value) > 0))
+ if (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (case_low, max_value) > 0
+ && tree_int_cst_compare (case_high, max_value) > 0)
{
warning ("case label value %d exceeds maximum value for type",
TREE_INT_CST (case_high));
}
/* Saturate lower case label value to minimum. */
- if ((tree_int_cst_compare (case_high, min_value) >= 0)
- && (tree_int_cst_compare (case_low, min_value) < 0))
+ if (TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (case_high, min_value) >= 0
+ && tree_int_cst_compare (case_low, min_value) < 0)
{
warning ("lower value %d in case label range less than minimum value for type",
TREE_INT_CST (case_low));
}
/* Saturate upper case label value to maximum. */
- if ((tree_int_cst_compare (case_low, max_value) <= 0)
- && (tree_int_cst_compare (case_high, max_value) > 0))
+ if (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (case_low, max_value) <= 0
+ && tree_int_cst_compare (case_high, max_value) > 0)
{
warning ("upper value %d in case label range exceeds maximum value for type",
TREE_INT_CST (case_high));
bad_subreg = 1;
}
- op0_is_copy = ((GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
+ op0_is_copy = ((GET_CODE (op0) == SUBREG || REG_P (op0))
&& temp != get_last_insn ());
op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1));
}
- if (icode != (int) CODE_FOR_nothing && GET_CODE (op0) == MEM)
+ if (icode != (int) CODE_FOR_nothing && MEM_P (op0))
{
rtx addr = (general_operand (XEXP (op0, 0), mode)
? force_reg (Pmode, XEXP (op0, 0))
case UNEQ_EXPR:
code = UNEQ;
break;
+ case LTGT_EXPR:
+ code = LTGT;
+ break;
default:
abort ();
}
/* If this failed, we have to do this with set/compare/jump/set code. */
- if (GET_CODE (target) != REG
+ if (!REG_P (target)
|| reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
target = gen_reg_rtx (GET_MODE (target));
out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
and break_out_memory_refs will go to work on it and mess it up. */
#ifdef PIC_CASE_VECTOR_ADDRESS
- if (flag_pic && GET_CODE (index) != REG)
+ if (flag_pic && !REG_P (index))
index = copy_to_mode_reg (Pmode, index);
#endif
return gen_rtx_raw_CONST_VECTOR (mode, v);
}
-
#include "gt-expr.h"