#endif
#endif
-/* Convert defined/undefined to boolean. */
-#ifdef TARGET_MEM_FUNCTIONS
-#undef TARGET_MEM_FUNCTIONS
-#define TARGET_MEM_FUNCTIONS 1
-#else
-#define TARGET_MEM_FUNCTIONS 0
-#endif
-
/* If this is nonzero, we do not bother generating VOLATILE
around volatile memory references, and we are willing to
#endif
static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx);
static rtx const_vector_from_tree (tree);
-static void execute_expand (void);
/* Record for each mode whether we can move a register directly to or
from an object of that mode in memory. If we can't, we won't try
if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD
&& GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD)
{
- if (!((GET_CODE (from) == MEM
+ if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
&& ! mode_dependent_address_p (XEXP (from, 0)))
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
GET_MODE_BITSIZE (from_mode)))
{
- if (!((GET_CODE (from) == MEM
+ if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
&& ! mode_dependent_address_p (XEXP (from, 0)))
&& GET_MODE_CLASS (oldmode) == MODE_INT
&& (GET_CODE (x) == CONST_DOUBLE
|| (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode)
- && ((GET_CODE (x) == MEM && ! MEM_VOLATILE_P (x)
+ && ((MEM_P (x) && ! MEM_VOLATILE_P (x)
&& direct_load[(int) mode])
|| (REG_P (x)
&& (! HARD_REGISTER_P (x)
align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
- if (GET_MODE (x) != BLKmode)
- abort ();
- if (GET_MODE (y) != BLKmode)
- abort ();
-
x = protect_from_queue (x, 1);
y = protect_from_queue (y, 0);
size = protect_from_queue (size, 0);
- if (GET_CODE (x) != MEM)
+ if (!MEM_P (x))
abort ();
- if (GET_CODE (y) != MEM)
+ if (!MEM_P (y))
abort ();
if (size == 0)
abort ();
+ /* Make sure we've got BLKmode addresses; store_one_arg can decide that
+ block copy is more efficient for other large modes, e.g. DCmode. */
+ x = adjust_address (x, BLKmode, 0);
+ y = adjust_address (y, BLKmode, 0);
+
/* Set MEM_SIZE as appropriate for this block copy. The main place this
can be incorrect is coming from __builtin_memcpy. */
if (GET_CODE (size) == CONST_INT)
return false;
}
-/* A subroutine of emit_block_move. Expand a call to memcpy or bcopy.
+/* A subroutine of emit_block_move. Expand a call to memcpy.
Return the return value from memcpy, 0 otherwise. */
static rtx
could get the wrong value for an argument.
To avoid this problem we go ahead and emit code to copy the addresses of
- DST and SRC and SIZE into new pseudos. We can then place those new
- pseudos into an RTL_EXPR and use them later, even after a call to
- emit_queue.
+ DST and SRC and SIZE into new pseudos.
Note this is not strictly needed for library calls since they do not call
emit_queue before loading their arguments. However, we may need to have
dst_tree = make_tree (ptr_type_node, dst_addr);
src_tree = make_tree (ptr_type_node, src_addr);
- if (TARGET_MEM_FUNCTIONS)
- size_mode = TYPE_MODE (sizetype);
- else
- size_mode = TYPE_MODE (unsigned_type_node);
+ size_mode = TYPE_MODE (sizetype);
size = convert_to_mode (size_mode, size, 1);
size = copy_to_mode_reg (size_mode, size);
memcpy in this context. This could be a user call to memcpy and
the user may wish to examine the return value from memcpy. For
targets where libcalls and normal calls have different conventions
- for returning pointers, we could end up generating incorrect code.
-
- For convenience, we generate the call to bcopy this way as well. */
+ for returning pointers, we could end up generating incorrect code. */
- if (TARGET_MEM_FUNCTIONS)
- size_tree = make_tree (sizetype, size);
- else
- size_tree = make_tree (unsigned_type_node, size);
+ size_tree = make_tree (sizetype, size);
fn = emit_block_move_libcall_fn (true);
arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE);
- if (TARGET_MEM_FUNCTIONS)
- {
- arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
- arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
- }
- else
- {
- arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
- arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
- }
+ arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
+ arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
/* Now we have to build up the CALL_EXPR itself. */
call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
gen_rtx_CLOBBER (VOIDmode, dst),
NULL_RTX));
- return TARGET_MEM_FUNCTIONS ? retval : NULL_RTX;
+ return retval;
}
/* A subroutine of emit_block_move_via_libcall. Create the tree node
{
tree args, fn;
- if (TARGET_MEM_FUNCTIONS)
- {
- fn = get_identifier ("memcpy");
- args = build_function_type_list (ptr_type_node, ptr_type_node,
- const_ptr_type_node, sizetype,
- NULL_TREE);
- }
- else
- {
- fn = get_identifier ("bcopy");
- args = build_function_type_list (void_type_node, const_ptr_type_node,
- ptr_type_node, unsigned_type_node,
- NULL_TREE);
- }
+ fn = get_identifier ("memcpy");
+ args = build_function_type_list (ptr_type_node, ptr_type_node,
+ const_ptr_type_node, sizetype,
+ NULL_TREE);
fn = build_decl (FUNCTION_DECL, fn, args);
DECL_EXTERNAL (fn) = 1;
from strange tricks we might play; but make sure that the source can
be loaded directly into the destination. */
src = orig_src;
- if (GET_CODE (orig_src) != MEM
+ if (!MEM_P (orig_src)
&& (!CONSTANT_P (orig_src)
|| (GET_MODE (orig_src) != mode
&& GET_MODE (orig_src) != VOIDmode)))
}
/* Optimize the access just a bit. */
- if (GET_CODE (src) == MEM
+ if (MEM_P (src)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src))
|| MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
mode, mode, ssize);
if (shift)
- expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift),
- tmps[i], 0, OPTAB_WIDEN);
+ tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
+ build_int_2 (shift, 0), tmps[i], 0);
}
emit_queue ();
emit_group_load (dst, temp, type, ssize);
return;
}
- else if (GET_CODE (dst) != MEM && GET_CODE (dst) != CONCAT)
+ else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
{
dst = gen_reg_rtx (GET_MODE (orig_dst));
/* Make life a bit easier for combine. */
)
{
int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
- expand_binop (mode, ashr_optab, tmps[i], GEN_INT (shift),
- tmps[i], 0, OPTAB_WIDEN);
+ tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
+ build_int_2 (shift, 0), tmps[i], 0);
}
bytelen = ssize - bytepos;
}
}
/* Optimize the access just a bit. */
- if (GET_CODE (dest) == MEM
+ if (MEM_P (dest)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
|| MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
clear_storage (rtx object, rtx size)
{
rtx retval = 0;
- unsigned int align = (GET_CODE (object) == MEM ? MEM_ALIGN (object)
+ unsigned int align = (MEM_P (object) ? MEM_ALIGN (object)
: GET_MODE_ALIGNMENT (GET_MODE (object)));
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
return false;
}
-/* A subroutine of clear_storage. Expand a call to memset or bzero.
+/* A subroutine of clear_storage. Expand a call to memset.
Return the return value of memset, 0 otherwise. */
static rtx
not careful we could get the wrong value for an argument.
To avoid this problem we go ahead and emit code to copy OBJECT
- and SIZE into new pseudos. We can then place those new pseudos
- into an RTL_EXPR and use them later, even after a call to
- emit_queue.
+ and SIZE into new pseudos.
Note this is not strictly needed for library calls since they
do not call emit_queue before loading their arguments. However,
object = copy_to_mode_reg (Pmode, XEXP (object, 0));
- if (TARGET_MEM_FUNCTIONS)
- size_mode = TYPE_MODE (sizetype);
- else
- size_mode = TYPE_MODE (unsigned_type_node);
+ size_mode = TYPE_MODE (sizetype);
size = convert_to_mode (size_mode, size, 1);
size = copy_to_mode_reg (size_mode, size);
memset in this context. This could be a user call to memset and
the user may wish to examine the return value from memset. For
targets where libcalls and normal calls have different conventions
- for returning pointers, we could end up generating incorrect code.
-
- For convenience, we generate the call to bzero this way as well. */
+ for returning pointers, we could end up generating incorrect code. */
object_tree = make_tree (ptr_type_node, object);
- if (TARGET_MEM_FUNCTIONS)
- size_tree = make_tree (sizetype, size);
- else
- size_tree = make_tree (unsigned_type_node, size);
+ size_tree = make_tree (sizetype, size);
fn = clear_storage_libcall_fn (true);
arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE);
- if (TARGET_MEM_FUNCTIONS)
- arg_list = tree_cons (NULL_TREE, integer_zero_node, arg_list);
+ arg_list = tree_cons (NULL_TREE, integer_zero_node, arg_list);
arg_list = tree_cons (NULL_TREE, object_tree, arg_list);
/* Now we have to build up the CALL_EXPR itself. */
if (RTX_UNCHANGING_P (object))
emit_insn (gen_rtx_CLOBBER (VOIDmode, object));
- return (TARGET_MEM_FUNCTIONS ? retval : NULL_RTX);
+ return retval;
}
/* A subroutine of clear_storage_via_libcall. Create the tree node
{
tree fn, args;
- if (TARGET_MEM_FUNCTIONS)
- {
- fn = get_identifier ("memset");
- args = build_function_type_list (ptr_type_node, ptr_type_node,
- integer_type_node, sizetype,
- NULL_TREE);
- }
- else
- {
- fn = get_identifier ("bzero");
- args = build_function_type_list (void_type_node, ptr_type_node,
- unsigned_type_node, NULL_TREE);
- }
+ fn = get_identifier ("memset");
+ args = build_function_type_list (ptr_type_node, ptr_type_node,
+ integer_type_node, sizetype,
+ NULL_TREE);
fn = build_decl (FUNCTION_DECL, fn, args);
DECL_EXTERNAL (fn) = 1;
/* If X or Y are memory references, verify that their addresses are valid
for the machine. */
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
&& ! push_operand (x, GET_MODE (x)))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (x, 0)))))
x = validize_mem (x);
- if (GET_CODE (y) == MEM
+ if (MEM_P (y)
&& (! memory_address_p (GET_MODE (y), XEXP (y, 0))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (y, 0)))))
if (reload_in_progress)
{
x = gen_lowpart_common (tmode, x1);
- if (x == 0 && GET_CODE (x1) == MEM)
+ if (x == 0 && MEM_P (x1))
{
x = adjust_address_nv (x1, tmode, 0);
copy_replacements (x1, x);
}
y = gen_lowpart_common (tmode, y1);
- if (y == 0 && GET_CODE (y1) == MEM)
+ if (y == 0 && MEM_P (y1))
{
y = adjust_address_nv (y1, tmode, 0);
copy_replacements (y1, y);
/* If we are in reload, see if either operand is a MEM whose address
is scheduled for replacement. */
- if (reload_in_progress && GET_CODE (x) == MEM
+ if (reload_in_progress && MEM_P (x)
&& (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
x = replace_equiv_address_nv (x, inner);
- if (reload_in_progress && GET_CODE (y) == MEM
+ if (reload_in_progress && MEM_P (y)
&& (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
y = replace_equiv_address_nv (y, inner);
{
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
- if (GET_CODE (to_rtx) != MEM)
+ if (!MEM_P (to_rtx))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
/* A constant address in TO_RTX can have VOIDmode, we must not try
to call force_reg for that case. Avoid that case. */
- if (GET_CODE (to_rtx) == MEM
+ if (MEM_P (to_rtx)
&& GET_MODE (to_rtx) == BLKmode
&& GET_MODE (XEXP (to_rtx, 0)) != VOIDmode
&& bitsize > 0
offset));
}
- if (GET_CODE (to_rtx) == MEM)
+ if (MEM_P (to_rtx))
{
/* If the field is at offset zero, we could have been given the
DECL_RTX of the parent struct. Don't munge it. */
/* Deal with volatile and readonly fields. The former is only done
for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
- if (volatilep && GET_CODE (to_rtx) == MEM)
+ if (volatilep && MEM_P (to_rtx))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
/* We can't assert that a MEM won't be set more than once
if the component is not addressable because another
non-addressable component may be referenced by the same MEM. */
- && ! (GET_CODE (to_rtx) == MEM && ! can_address_p (to)))
+ && ! (MEM_P (to_rtx) && ! can_address_p (to)))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
RTX_UNCHANGING_P (to_rtx) = 1;
}
- if (GET_CODE (to_rtx) == MEM && ! can_address_p (to))
+ if (MEM_P (to_rtx) && ! can_address_p (to))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
+ /* Disabled temporarily. GET_MODE (to_rtx) is often not the right
+ mode. */
+ while (0 && mode1 == VOIDmode && !want_value
+ && bitpos + bitsize <= BITS_PER_WORD
+ && bitsize < BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (to_rtx)) <= BITS_PER_WORD
+ && !TREE_SIDE_EFFECTS (to)
+ && !TREE_THIS_VOLATILE (to))
+ {
+ tree src, op0, op1;
+ rtx value;
+ HOST_WIDE_INT count = bitpos;
+ optab binop;
+
+ src = from;
+ STRIP_NOPS (src);
+ if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE
+ || TREE_CODE_CLASS (TREE_CODE (src)) != '2')
+ break;
+
+ op0 = TREE_OPERAND (src, 0);
+ op1 = TREE_OPERAND (src, 1);
+ STRIP_NOPS (op0);
+
+ if (! operand_equal_p (to, op0, 0))
+ break;
+
+ if (BYTES_BIG_ENDIAN)
+ count = GET_MODE_BITSIZE (GET_MODE (to_rtx)) - bitpos - bitsize;
+
+ /* Special case some bitfield op= exp. */
+ switch (TREE_CODE (src))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ if (count <= 0)
+ break;
+
+ /* For now, just optimize the case of the topmost bitfield
+ where we don't need to do any masking and also
+ 1 bit bitfields where xor can be used.
+ We might win by one instruction for the other bitfields
+ too if insv/extv instructions aren't used, so that
+ can be added later. */
+ if (count + bitsize != GET_MODE_BITSIZE (GET_MODE (to_rtx))
+ && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
+ break;
+ value = expand_expr (op1, NULL_RTX, VOIDmode, 0);
+ value = protect_from_queue (value, 0);
+ to_rtx = protect_from_queue (to_rtx, 1);
+ binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ if (bitsize == 1
+ && count + bitsize != GET_MODE_BITSIZE (GET_MODE (to_rtx)))
+ {
+ value = expand_and (GET_MODE (to_rtx), value, const1_rtx,
+ NULL_RTX);
+ binop = xor_optab;
+ }
+ value = expand_shift (LSHIFT_EXPR, GET_MODE (to_rtx),
+ value, build_int_2 (count, 0),
+ NULL_RTX, 1);
+ result = expand_binop (GET_MODE (to_rtx), binop, to_rtx,
+ value, to_rtx, 1, OPTAB_WIDEN);
+ if (result != to_rtx)
+ emit_move_insn (to_rtx, result);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return NULL_RTX;
+ default:
+ break;
+ }
+
+ break;
+ }
+
result = store_field (to_rtx, bitsize, bitpos, mode1, from,
(want_value
/* Spurious cast for HPUX compiler. */
size = expr_size (from);
from_rtx = expand_expr (from, NULL_RTX, VOIDmode, 0);
- if (TARGET_MEM_FUNCTIONS)
- emit_library_call (memmove_libfunc, LCT_NORMAL,
- VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
- XEXP (from_rtx, 0), Pmode,
- convert_to_mode (TYPE_MODE (sizetype),
- size, TYPE_UNSIGNED (sizetype)),
- TYPE_MODE (sizetype));
- else
- emit_library_call (bcopy_libfunc, LCT_NORMAL,
- VOIDmode, 3, XEXP (from_rtx, 0), Pmode,
- XEXP (to_rtx, 0), Pmode,
- convert_to_mode (TYPE_MODE (integer_type_node),
- size,
- TYPE_UNSIGNED (integer_type_node)),
- TYPE_MODE (integer_type_node));
+ emit_library_call (memmove_libfunc, LCT_NORMAL,
+ VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
+ XEXP (from_rtx, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TYPE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
preserve_temp_slots (to_rtx);
free_temp_slots ();
dont_return_target = 1;
}
else if ((want_value & 1) != 0
- && GET_CODE (target) == MEM
+ && MEM_P (target)
&& ! MEM_VOLATILE_P (target)
&& GET_MODE (target) != BLKmode)
/* If target is in memory and caller wants value in a register instead,
only necessary if the MEM is volatile, or if the address
overlaps TARGET. But not performing the load twice also
reduces the amount of rtl we generate and then have to CSE. */
- if (GET_CODE (temp) == MEM && (want_value & 1) != 0)
+ if (MEM_P (temp) && (want_value & 1) != 0)
temp = copy_to_reg (temp);
/* If TEMP is a VOIDmode constant, use convert_modes to make
or if we really want the correct value. */
if (!(target && REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER)
- && !(GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ && !(MEM_P (target) && MEM_VOLATILE_P (target))
&& ! rtx_equal_p (temp, target)
&& (CONSTANT_P (temp) || (want_value & 1) != 0))
dont_return_target = 1;
/* If we are supposed to return TEMP, do so as long as it isn't a MEM.
??? The latter test doesn't seem to make sense. */
- else if (dont_return_target && GET_CODE (temp) != MEM)
+ else if (dont_return_target && !MEM_P (temp))
return temp;
/* Return TARGET itself if it is a hard register. */
tree telts = array_type_nelts (type);
if (telts && host_integerp (telts, 1))
{
- HOST_WIDE_INT n = tree_low_cst (telts, 1);
+ HOST_WIDE_INT n = tree_low_cst (telts, 1) + 1;
HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type));
if (n == 0)
return 0;
- if (max / n < m)
+ else if (max / n > m)
return n * m;
}
return -1;
/* If we have a nonzero bitpos for a register target, then we just
let store_field do the bitfield handling. This is unlikely to
generate unnecessary clear instructions anyways. */
- && (bitpos == 0 || GET_CODE (target) == MEM))
+ && (bitpos == 0 || MEM_P (target)))
{
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
target
= adjust_address (target,
GET_MODE (target) == BLKmode
/* Update the alias set, if required. */
- if (GET_CODE (target) == MEM && ! MEM_KEEP_ALIAS_SET_P (target)
+ if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
&& MEM_ALIAS_SET (target) != 0)
{
target = copy_rtx (target);
target));
offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
- if (GET_CODE (to_rtx) != MEM)
+ if (!MEM_P (to_rtx))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
if (TREE_READONLY (field))
{
- if (GET_CODE (to_rtx) == MEM)
+ if (MEM_P (to_rtx))
to_rtx = copy_rtx (to_rtx);
RTX_UNCHANGING_P (to_rtx) = 1;
}
#endif
- if (GET_CODE (to_rtx) == MEM && !MEM_KEEP_ALIAS_SET_P (to_rtx)
+ if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
&& DECL_NONADDRESSABLE_P (field))
{
to_rtx = copy_rtx (to_rtx);
&& (lo = tree_low_cst (lo_index, 0),
hi = tree_low_cst (hi_index, 0),
count = hi - lo + 1,
- (GET_CODE (target) != MEM
+ (!MEM_P (target)
|| count <= 2
|| (host_integerp (TYPE_SIZE (elttype), 1)
&& (tree_low_cst (TYPE_SIZE (elttype), 1) * count
{
bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0);
- if (GET_CODE (target) == MEM
+ if (MEM_P (target)
&& !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
= gen_reg_rtx (promote_mode (domain, DECL_MODE (index),
&unsignedp, 0));
SET_DECL_RTL (index, index_r);
- if (TREE_CODE (value) == SAVE_EXPR
- && SAVE_EXPR_RTL (value) == 0)
- {
- /* Make sure value gets expanded once before the
- loop. */
- expand_expr (value, const0_rtx, VOIDmode, 0);
- emit_queue ();
- }
store_expr (lo_index, index_r, 0);
/* Build the head of the loop. */
else
bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1));
- if (GET_CODE (target) == MEM && !MEM_KEEP_ALIAS_SET_P (target)
+ if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
{
and then "or" in whatever non-constant ranges we need in addition.
If a large set is all zero or all ones, it is
- probably better to set it using memset (if available) or bzero.
+ probably better to set it using memset.
Also, if a large set has just a single range, it may also be
better to first clear all the first clear the set (using
- bzero/memset), and set the bits we want. */
+ memset), and set the bits we want. */
/* Check for all zeros. */
if (elt == NULL_TREE && size > 0)
/* The assumption here is that it is safe to use
XEXP if the set is multi-word, but not if
it's single-word. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
to_rtx = adjust_address (target, mode, offset);
else if (offset == 0)
to_rtx = target;
emit_move_insn (targetx, target);
}
- else if (GET_CODE (target) == MEM)
+ else if (MEM_P (target))
targetx = target;
else
abort ();
/* Optimization: If startbit and endbit are constants divisible
by BITS_PER_UNIT, call memset instead. */
- if (TARGET_MEM_FUNCTIONS
- && TREE_CODE (startbit) == INTEGER_CST
+ if (TREE_CODE (startbit) == INTEGER_CST
&& TREE_CODE (endbit) == INTEGER_CST
&& (startb = TREE_INT_CST_LOW (startbit)) % BITS_PER_UNIT == 0
&& (endb = TREE_INT_CST_LOW (endbit) + 1) % BITS_PER_UNIT == 0)
boundary. If so, we simply do a block copy. */
if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode)
{
- if (GET_CODE (target) != MEM || GET_CODE (temp) != MEM
+ if (!MEM_P (target) || !MEM_P (temp)
|| bitpos % BITS_PER_UNIT != 0)
abort ();
/* The caller wants an rtx for the value.
If possible, avoid refetching from the bitfield itself. */
if (width_mask != 0
- && ! (GET_CODE (target) == MEM && MEM_VOLATILE_P (target)))
+ && ! (MEM_P (target) && MEM_VOLATILE_P (target)))
{
tree count;
enum machine_mode tmode;
else if (TREE_CODE (exp) == COMPONENT_REF)
{
tree field = TREE_OPERAND (exp, 1);
- tree this_offset = DECL_FIELD_OFFSET (field);
+ tree this_offset = component_ref_field_offset (exp);
/* If this field hasn't been filled in yet, don't go
past it. This should only happen when folding expressions
made during type construction. */
if (this_offset == 0)
break;
- else
- this_offset = SUBSTITUTE_PLACEHOLDER_IN_EXPR (this_offset, exp);
offset = size_binop (PLUS_EXPR, offset, this_offset);
bit_offset = size_binop (PLUS_EXPR, bit_offset,
|| TREE_CODE (exp) == ARRAY_RANGE_REF)
{
tree index = TREE_OPERAND (exp, 1);
- tree array = TREE_OPERAND (exp, 0);
- tree domain = TYPE_DOMAIN (TREE_TYPE (array));
- tree low_bound = (domain ? TYPE_MIN_VALUE (domain) : 0);
- tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (array)));
+ tree low_bound = array_ref_low_bound (exp);
+ tree unit_size = array_ref_element_size (exp);
/* We assume all arrays have sizes that are a multiple of a byte.
First subtract the lower bound, if any, in the type of the
index, then convert to sizetype and multiply by the size of the
array element. */
- if (low_bound != 0 && ! integer_zerop (low_bound))
+ if (! integer_zerop (low_bound))
index = fold (build (MINUS_EXPR, TREE_TYPE (index),
index, low_bound));
- /* If the index has a self-referential type, instantiate it with
- the object; likewise for the component size. */
- index = SUBSTITUTE_PLACEHOLDER_IN_EXPR (index, exp);
- unit_size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (unit_size, array);
offset = size_binop (PLUS_EXPR, offset,
size_binop (MULT_EXPR,
convert (sizetype, index),
return exp;
}
+/* Return a tree of sizetype representing the size, in bytes, of the element
+ of EXP, an ARRAY_REF. */
+
+tree
+array_ref_element_size (tree exp)
+{
+ tree aligned_size = TREE_OPERAND (exp, 3);
+ tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+
+ /* If a size was specified in the ARRAY_REF, it's the size measured
+ in alignment units of the element type. So multiply by that value. */
+ if (aligned_size)
+ return size_binop (MULT_EXPR, aligned_size,
+ size_int (TYPE_ALIGN (elmt_type) / BITS_PER_UNIT));
+
+ /* Otherwise, take the size from that of the element type. Substitute
+ any PLACEHOLDER_EXPR that we have. */
+ else
+ return SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_SIZE_UNIT (elmt_type), exp);
+}
+
+/* Return a tree representing the lower bound of the array mentioned in
+ EXP, an ARRAY_REF. */
+
+tree
+array_ref_low_bound (tree exp)
+{
+ tree domain_type = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (exp, 0)));
+
+ /* If a lower bound is specified in EXP, use it. */
+ if (TREE_OPERAND (exp, 2))
+ return TREE_OPERAND (exp, 2);
+
+ /* Otherwise, if there is a domain type and it has a lower bound, use it,
+ substituting for a PLACEHOLDER_EXPR as needed. */
+ if (domain_type && TYPE_MIN_VALUE (domain_type))
+ return SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_MIN_VALUE (domain_type), exp);
+
+ /* Otherwise, return a zero of the appropriate type. */
+ return fold_convert (TREE_TYPE (TREE_OPERAND (exp, 1)), integer_zero_node);
+}
+
+/* Return a tree representing the offset, in bytes, of the field referenced
+ by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */
+
+tree
+component_ref_field_offset (tree exp)
+{
+ tree aligned_offset = TREE_OPERAND (exp, 2);
+ tree field = TREE_OPERAND (exp, 1);
+
+ /* If an offset was specified in the COMPONENT_REF, it's the offset measured
+ in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. So multiply by that
+ value. */
+ if (aligned_offset)
+ return size_binop (MULT_EXPR, aligned_offset,
+ size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT));
+
+ /* Otherwise, take the offset from that of the field. Substitute
+ any PLACEHOLDER_EXPR that we have. */
+ else
+ return SUBSTITUTE_PLACEHOLDER_IN_EXPR (DECL_FIELD_OFFSET (field), exp);
+}
+
/* Return 1 if T is an expression that get_inner_reference handles. */
int
/* Check for subreg applied to an expression produced by loop optimizer. */
if (code == SUBREG
&& !REG_P (SUBREG_REG (value))
- && GET_CODE (SUBREG_REG (value)) != MEM)
+ && !MEM_P (SUBREG_REG (value)))
{
value = simplify_gen_subreg (GET_MODE (value),
force_reg (GET_MODE (SUBREG_REG (value)),
#ifdef INSN_SCHEDULING
/* On machines that have insn scheduling, we want all memory reference to be
explicit, so we need to deal with such paradoxical SUBREGs. */
- if (GET_CODE (value) == SUBREG && GET_CODE (SUBREG_REG (value)) == MEM
+ if (GET_CODE (value) == SUBREG && MEM_P (SUBREG_REG (value))
&& (GET_MODE_SIZE (GET_MODE (value))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (value)))))
value
{
rtx exp_rtl = 0;
int i, nops;
- static tree save_expr_list;
if (x == 0
/* If EXP has varying size, we MUST use a target since we currently
!= INTEGER_CST)
&& GET_MODE (x) == BLKmode)
/* If X is in the outgoing argument area, it is always safe. */
- || (GET_CODE (x) == MEM
+ || (MEM_P (x)
&& (XEXP (x, 0) == virtual_outgoing_args_rtx
|| (GET_CODE (XEXP (x, 0)) == PLUS
&& XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
return 0;
}
- /* A SAVE_EXPR might appear many times in the expression passed to the
- top-level safe_from_p call, and if it has a complex subexpression,
- examining it multiple times could result in a combinatorial explosion.
- E.g. on an Alpha running at least 200MHz, a Fortran testcase compiled
- with optimization took about 28 minutes to compile -- even though it was
- only a few lines long. So we mark each SAVE_EXPR we see with TREE_PRIVATE
- and turn that off when we are done. We keep a list of the SAVE_EXPRs
- we have processed. Note that the only test of top_p was above. */
-
- if (top_p)
- {
- int rtn;
- tree t;
-
- save_expr_list = 0;
-
- rtn = safe_from_p (x, exp, 0);
-
- for (t = save_expr_list; t != 0; t = TREE_CHAIN (t))
- TREE_PRIVATE (TREE_PURPOSE (t)) = 0;
-
- return rtn;
- }
-
/* Now look at our tree code and possibly recurse. */
switch (TREE_CODE_CLASS (TREE_CODE (exp)))
{
else
return 0;
+ case 's':
+ /* The only case we look at here is the DECL_INITIAL inside a
+ DECL_EXPR. */
+ return (TREE_CODE (exp) != DECL_EXPR
+ || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
+ || !DECL_INITIAL (DECL_EXPR_DECL (exp))
+ || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
+
case '2':
case '<':
if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
if (DECL_P (exp))
{
if (!DECL_RTL_SET_P (exp)
- || GET_CODE (DECL_RTL (exp)) != MEM)
+ || !MEM_P (DECL_RTL (exp)))
return 0;
else
exp_rtl = XEXP (DECL_RTL (exp), 0);
break;
case INDIRECT_REF:
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
get_alias_set (exp)))
return 0;
/* Assume that the call will clobber all hard registers and
all of memory. */
if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
- || GET_CODE (x) == MEM)
+ || MEM_P (x))
return 0;
break;
- case RTL_EXPR:
- /* If a sequence exists, we would have to scan every instruction
- in the sequence to see if it was safe. This is probably not
- worthwhile. */
- if (RTL_EXPR_SEQUENCE (exp))
- return 0;
-
- exp_rtl = RTL_EXPR_RTL (exp);
- break;
-
case WITH_CLEANUP_EXPR:
exp_rtl = WITH_CLEANUP_EXPR_RTL (exp);
break;
case CLEANUP_POINT_EXPR:
- return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
-
case SAVE_EXPR:
- exp_rtl = SAVE_EXPR_RTL (exp);
- if (exp_rtl)
- break;
-
- /* If we've already scanned this, don't do it again. Otherwise,
- show we've scanned it and record for clearing the flag if we're
- going on. */
- if (TREE_PRIVATE (exp))
- return 1;
-
- TREE_PRIVATE (exp) = 1;
- if (! safe_from_p (x, TREE_OPERAND (exp, 0), 0))
- {
- TREE_PRIVATE (exp) = 0;
- return 0;
- }
-
- save_expr_list = tree_cons (exp, NULL_TREE, save_expr_list);
- return 1;
+ return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
case BIND_EXPR:
/* The only operand we look at is operand 1. The rest aren't
/* If the rtl is X, then it is not safe. Otherwise, it is unless both
are memory and they conflict. */
return ! (rtx_equal_p (x, exp_rtl)
- || (GET_CODE (x) == MEM && GET_CODE (exp_rtl) == MEM
+ || (MEM_P (x) && MEM_P (exp_rtl)
&& true_dependence (exp_rtl, VOIDmode, x,
rtx_addr_varies_p)));
}
emit_line_note (input_location);
/* Record where the insns produced belong. */
- if (cfun->dont_emit_block_notes)
- record_block_change (TREE_BLOCK (exp));
+ record_block_change (TREE_BLOCK (exp));
ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl);
original_target = target;
ignore = (target == const0_rtx
|| ((code == NON_LVALUE_EXPR || code == NOP_EXPR
- || code == CONVERT_EXPR || code == REFERENCE_EXPR
- || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
+ || code == CONVERT_EXPR || code == COND_EXPR
+ || code == VIEW_CONVERT_EXPR)
&& TREE_CODE (type) == VOID_TYPE));
/* If we are going to ignore this result, we need only do something
&& modifier != EXPAND_CONST_ADDRESS)
{
temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
- if (GET_CODE (temp) == MEM)
+ if (MEM_P (temp))
temp = copy_to_reg (temp);
return const0_rtx;
}
if (context != 0 && context != current_function_decl
/* If var is static, we don't need a static chain to access it. */
- && ! (GET_CODE (DECL_RTL (exp)) == MEM
+ && ! (MEM_P (DECL_RTL (exp))
&& CONSTANT_P (XEXP (DECL_RTL (exp), 0))))
{
rtx addr;
if (DECL_NO_STATIC_CHAIN (current_function_decl))
abort ();
lang_hooks.mark_addressable (exp);
- if (GET_CODE (DECL_RTL (exp)) != MEM)
+ if (!MEM_P (DECL_RTL (exp)))
abort ();
addr = XEXP (DECL_RTL (exp), 0);
- if (GET_CODE (addr) == MEM)
+ if (MEM_P (addr))
addr
= replace_equiv_address (addr,
fix_lexical_addr (XEXP (addr, 0), exp));
from its initializer, while the initializer is still being parsed.
See expand_decl. */
- else if (GET_CODE (DECL_RTL (exp)) == MEM
+ else if (MEM_P (DECL_RTL (exp))
&& REG_P (XEXP (DECL_RTL (exp), 0)))
temp = validize_mem (DECL_RTL (exp));
the address is not valid or it is not a register and -fforce-addr
is specified, get the address into a register. */
- else if (GET_CODE (DECL_RTL (exp)) == MEM
+ else if (MEM_P (DECL_RTL (exp))
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_SUM
&& modifier != EXPAND_INITIALIZER
if the address is a register. */
if (temp != 0)
{
- if (GET_CODE (temp) == MEM && REG_P (XEXP (temp, 0)))
+ if (MEM_P (temp) && REG_P (XEXP (temp, 0)))
mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
return temp;
return temp;
case SAVE_EXPR:
- context = decl_function_context (exp);
-
- /* If this SAVE_EXPR was at global context, assume we are an
- initialization function and move it into our context. */
- if (context == 0)
- SAVE_EXPR_CONTEXT (exp) = current_function_decl;
-
- if (context == current_function_decl)
- context = 0;
-
- /* If this is non-local, handle it. */
- if (context)
- {
- /* The following call just exists to abort if the context is
- not of a containing function. */
- find_function_data (context);
-
- temp = SAVE_EXPR_RTL (exp);
- if (temp && REG_P (temp))
- {
- put_var_into_stack (exp, /*rescan=*/true);
- temp = SAVE_EXPR_RTL (exp);
- }
- if (temp == 0 || GET_CODE (temp) != MEM)
- abort ();
- return
- replace_equiv_address (temp,
- fix_lexical_addr (XEXP (temp, 0), exp));
- }
- if (SAVE_EXPR_RTL (exp) == 0)
- {
- if (mode == VOIDmode)
- temp = const0_rtx;
- else
- temp = assign_temp (build_qualified_type (type,
- (TYPE_QUALS (type)
- | TYPE_QUAL_CONST)),
- 3, 0, 0);
-
- SAVE_EXPR_RTL (exp) = temp;
- if (!optimize && REG_P (temp))
- save_expr_regs = gen_rtx_EXPR_LIST (VOIDmode, temp,
- save_expr_regs);
-
- /* If the mode of TEMP does not match that of the expression, it
- must be a promoted value. We pass store_expr a SUBREG of the
- wanted mode but mark it so that we know that it was already
- extended. */
-
- if (REG_P (temp) && GET_MODE (temp) != mode)
- {
- temp = gen_lowpart_SUBREG (mode, SAVE_EXPR_RTL (exp));
- promote_mode (type, mode, &unsignedp, 0);
- SUBREG_PROMOTED_VAR_P (temp) = 1;
- SUBREG_PROMOTED_UNSIGNED_SET (temp, unsignedp);
- }
-
- if (temp == const0_rtx)
- expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
- else
- store_expr (TREE_OPERAND (exp, 0), temp,
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ {
+ tree val = TREE_OPERAND (exp, 0);
+ rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl);
- TREE_USED (exp) = 1;
- }
+ if (TREE_CODE (val) != VAR_DECL || !DECL_ARTIFICIAL (val))
+ {
+ /* We can indeed still hit this case, typically via builtin
+ expanders calling save_expr immediately before expanding
+ something. Assume this means that we only have to deal
+ with non-BLKmode values. */
+ if (GET_MODE (ret) == BLKmode)
+ abort ();
- /* If the mode of SAVE_EXPR_RTL does not match that of the expression, it
- must be a promoted value. We return a SUBREG of the wanted mode,
- but mark it so that we know that it was already extended. */
+ val = build_decl (VAR_DECL, NULL, TREE_TYPE (exp));
+ DECL_ARTIFICIAL (val) = 1;
+ TREE_OPERAND (exp, 0) = val;
- if (REG_P (SAVE_EXPR_RTL (exp))
- && GET_MODE (SAVE_EXPR_RTL (exp)) != mode)
- {
- /* Compute the signedness and make the proper SUBREG. */
- promote_mode (type, mode, &unsignedp, 0);
- temp = gen_lowpart_SUBREG (mode, SAVE_EXPR_RTL (exp));
- SUBREG_PROMOTED_VAR_P (temp) = 1;
- SUBREG_PROMOTED_UNSIGNED_SET (temp, unsignedp);
- return temp;
- }
+ if (!CONSTANT_P (ret))
+ ret = copy_to_reg (ret);
+ SET_DECL_RTL (val, ret);
+ }
- return SAVE_EXPR_RTL (exp);
+ return ret;
+ }
case UNSAVE_EXPR:
{
case LABELED_BLOCK_EXPR:
if (LABELED_BLOCK_BODY (exp))
- expand_expr_stmt_value (LABELED_BLOCK_BODY (exp), 0, 1);
+ expand_expr_stmt (LABELED_BLOCK_BODY (exp));
/* Should perhaps use expand_label, but this is simpler and safer. */
do_pending_stack_adjust ();
emit_label (label_rtx (LABELED_BLOCK_LABEL (exp)));
tree block = BIND_EXPR_BLOCK (exp);
int mark_ends;
- if (TREE_CODE (BIND_EXPR_BODY (exp)) != RTL_EXPR)
- {
- /* If we're in functions-as-trees mode, this BIND_EXPR represents
- the block, so we need to emit NOTE_INSN_BLOCK_* notes. */
- mark_ends = (block != NULL_TREE);
- expand_start_bindings_and_block (mark_ends ? 0 : 2, block);
- }
- else
- {
- /* If we're not in functions-as-trees mode, we've already emitted
- those notes into our RTL_EXPR, so we just want to splice our BLOCK
- into the enclosing one. */
- mark_ends = 0;
-
- /* Need to open a binding contour here because
- if there are any cleanups they must be contained here. */
- expand_start_bindings_and_block (2, NULL_TREE);
-
- /* Mark the corresponding BLOCK for output in its proper place. */
- if (block)
- {
- if (TREE_USED (block))
- abort ();
- lang_hooks.decls.insert_block (block);
- }
- }
+ /* If we're in functions-as-trees mode, this BIND_EXPR represents
+ the block, so we need to emit NOTE_INSN_BLOCK_* notes. */
+ mark_ends = (block != NULL_TREE);
+ expand_start_bindings_and_block (mark_ends ? 0 : 2, block);
/* If VARS have not yet been expanded, expand them now. */
expand_vars (BIND_EXPR_VARS (exp));
return temp;
}
- case RTL_EXPR:
- if (RTL_EXPR_SEQUENCE (exp))
- {
- if (RTL_EXPR_SEQUENCE (exp) == const0_rtx)
- abort ();
- emit_insn (RTL_EXPR_SEQUENCE (exp));
- RTL_EXPR_SEQUENCE (exp) = const0_rtx;
- }
- preserve_rtl_expr_result (RTL_EXPR_RTL (exp));
- free_temps_for_rtl_expr (exp);
- if (alt_rtl)
- *alt_rtl = RTL_EXPR_ALT_RTL (exp);
- return RTL_EXPR_RTL (exp);
-
case CONSTRUCTOR:
/* If we don't need the result, just ensure we evaluate any
subexpressions. */
{
tree array = TREE_OPERAND (exp, 0);
- tree domain = TYPE_DOMAIN (TREE_TYPE (array));
- tree low_bound = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
+ tree low_bound = array_ref_low_bound (exp);
tree index = convert (sizetype, TREE_OPERAND (exp, 1));
HOST_WIDE_INT i;
C, but can in Ada if we have unchecked conversion of an expression
from a scalar type to an array or record type or for an
ARRAY_RANGE_REF whose type is BLKmode. */
- else if (GET_CODE (op0) != MEM
+ else if (!MEM_P (op0)
&& (offset != 0
|| (code == ARRAY_RANGE_REF && mode == BLKmode)))
{
- /* If the operand is a SAVE_EXPR, we can deal with this by
- forcing the SAVE_EXPR into memory. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR)
- {
- put_var_into_stack (TREE_OPERAND (exp, 0),
- /*rescan=*/true);
- op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0));
- }
- else
- {
- tree nt
- = build_qualified_type (TREE_TYPE (tem),
- (TYPE_QUALS (TREE_TYPE (tem))
- | TYPE_QUAL_CONST));
- rtx memloc = assign_temp (nt, 1, 1, 1);
+ tree nt = build_qualified_type (TREE_TYPE (tem),
+ (TYPE_QUALS (TREE_TYPE (tem))
+ | TYPE_QUAL_CONST));
+ rtx memloc = assign_temp (nt, 1, 1, 1);
- emit_move_insn (memloc, op0);
- op0 = memloc;
- }
+ emit_move_insn (memloc, op0);
+ op0 = memloc;
}
if (offset != 0)
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
EXPAND_SUM);
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
abort ();
#ifdef POINTERS_EXTEND_UNSIGNED
/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
record its alignment as BIGGEST_ALIGNMENT. */
- if (GET_CODE (op0) == MEM && bitpos == 0 && offset != 0
+ if (MEM_P (op0) && bitpos == 0 && offset != 0
&& is_aligning_offset (offset, tem))
set_mem_align (op0, BIGGEST_ALIGNMENT);
/* Don't forget about volatility even if this is a bitfield. */
- if (GET_CODE (op0) == MEM && volatilep && ! MEM_VOLATILE_P (op0))
+ if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
{
if (op0 == orig_op0)
op0 = copy_rtx (op0);
|| (mode1 != BLKmode
&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
|| (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
- || (GET_CODE (op0) == MEM
+ || (MEM_P (op0)
&& (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
|| (bitpos % GET_MODE_ALIGNMENT (mode1) != 0))))
&& ((modifier == EXPAND_CONST_ADDRESS
enum machine_mode ext_mode = mode;
if (ext_mode == BLKmode
- && ! (target != 0 && GET_CODE (op0) == MEM
- && GET_CODE (target) == MEM
+ && ! (target != 0 && MEM_P (op0)
+ && MEM_P (target)
&& bitpos % BITS_PER_UNIT == 0))
ext_mode = mode_for_size (bitsize, MODE_INT, 1);
/* In this case, BITPOS must start at a byte boundary and
TARGET, if specified, must be a MEM. */
- if (GET_CODE (op0) != MEM
- || (target != 0 && GET_CODE (target) != MEM)
+ if (!MEM_P (op0)
+ || (target != 0 && !MEM_P (target))
|| bitpos % BITS_PER_UNIT != 0)
abort ();
op0 = validize_mem (op0);
- if (GET_CODE (op0) == MEM && REG_P (XEXP (op0, 0)))
+ if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
return target;
}
- case VTABLE_REF:
- {
- rtx insn, before = get_last_insn (), vtbl_ref;
-
- /* Evaluate the interior expression. */
- subtarget = expand_expr (TREE_OPERAND (exp, 0), target,
- tmode, modifier);
-
- /* Get or create an instruction off which to hang a note. */
- if (REG_P (subtarget))
- {
- target = subtarget;
- insn = get_last_insn ();
- if (insn == before)
- abort ();
- if (! INSN_P (insn))
- insn = prev_nonnote_insn (insn);
- }
- else
- {
- target = gen_reg_rtx (GET_MODE (subtarget));
- insn = emit_move_insn (target, subtarget);
- }
-
- /* Collect the data for the note. */
- vtbl_ref = XEXP (DECL_RTL (TREE_OPERAND (exp, 1)), 0);
- vtbl_ref = plus_constant (vtbl_ref,
- tree_low_cst (TREE_OPERAND (exp, 2), 0));
- /* Discard the initial CONST that was added. */
- vtbl_ref = XEXP (vtbl_ref, 0);
-
- REG_NOTES (insn)
- = gen_rtx_EXPR_LIST (REG_VTABLE_REF, vtbl_ref, REG_NOTES (insn));
-
- return target;
- }
+ case OBJ_TYPE_REF:
+ return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
/* Intended for a reference to a buffer of a file-object in Pascal.
But it's not certain that a special tree code will really be
case NON_LVALUE_EXPR:
case NOP_EXPR:
case CONVERT_EXPR:
- case REFERENCE_EXPR:
if (TREE_OPERAND (exp, 0) == error_mark_node)
return const0_rtx;
target = assign_temp (type, 0, 1, 1);
}
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
/* Store data into beginning of memory target. */
store_expr (TREE_OPERAND (exp, 0),
adjust_address (target, TYPE_MODE (valtype), 0),
&& GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD
&& GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD)
op0 = gen_lowpart (TYPE_MODE (type), op0);
- else if (GET_CODE (op0) != MEM)
+ else if (!MEM_P (op0))
{
/* If the operand is not a MEM, force it into memory. Since we
are going to be be changing the mode of the MEM, don't call
that the operand is known to be aligned, indicate that it is.
Otherwise, we need only be concerned about alignment for non-BLKmode
results. */
- if (GET_CODE (op0) == MEM)
+ if (MEM_P (op0))
{
op0 = copy_rtx (op0);
&& (GET_MODE_CLASS (mode) == MODE_INT)
? addv_optab : add_optab;
- /* If we are adding a constant, an RTL_EXPR that is sp, fp, or ap, and
+ /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
something else, make sure we add the register to the constant and
then to the other thing. This case can occur during strength
reduction and doing it this way will produce better code if the
if (TREE_CODE (TREE_OPERAND (exp, 0)) == PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) == INTEGER_CST
- && TREE_CODE (TREE_OPERAND (exp, 1)) == RTL_EXPR
- && (RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx
- || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx
- || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx))
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == VAR_DECL
+ && (DECL_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx
+ || DECL_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx
+ || DECL_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx))
{
tree t = TREE_OPERAND (exp, 1);
target = original_target;
if (target == 0
|| modifier == EXPAND_STACK_PARM
- || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (MEM_P (target) && MEM_VOLATILE_P (target))
|| GET_MODE (target) != mode
|| (REG_P (target)
&& REGNO (target) < FIRST_PSEUDO_REGISTER))
/* At this point, a MEM target is no longer useful; we will get better
code without it. */
- if (GET_CODE (target) == MEM)
+ if (MEM_P (target))
target = gen_reg_rtx (mode);
/* If op1 was placed in target, swap op0 and op1. */
if (EXPR_HAS_LOCATION (exp))
{
emit_line_note (EXPR_LOCATION (exp));
- if (cfun->dont_emit_block_notes)
- record_block_change (TREE_BLOCK (exp));
+ record_block_change (TREE_BLOCK (exp));
}
expand_elseif (TREE_OPERAND (exp, 0));
expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, 0);
|| REG_P (original_target)
|| TREE_ADDRESSABLE (type))
#endif
- && (GET_CODE (original_target) != MEM
+ && (!MEM_P (original_target)
|| TREE_ADDRESSABLE (type)))
temp = original_target;
else if (TREE_ADDRESSABLE (type))
{
target = assign_temp (type, 2, 0, 1);
SET_DECL_RTL (slot, target);
- if (TREE_ADDRESSABLE (slot))
- put_var_into_stack (slot, /*rescan=*/false);
/* Since SLOT is not known to the called function
to belong to its stack frame, we must build an explicit
return target;
}
else
- {
- SET_DECL_RTL (slot, target);
- /* If we must have an addressable slot, then make sure that
- the RTL that we just stored in slot is OK. */
- if (TREE_ADDRESSABLE (slot))
- put_var_into_stack (slot, /*rescan=*/true);
- }
+ SET_DECL_RTL (slot, target);
}
exp1 = TREE_OPERAND (exp, 3) = TREE_OPERAND (exp, 1);
op0 = force_const_mem (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
op0);
else if (REG_P (op0) || GET_CODE (op0) == SUBREG
- || GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF
- || GET_CODE (op0) == PARALLEL || GET_CODE (op0) == LO_SUM)
+ || GET_CODE (op0) == CONCAT || GET_CODE (op0) == PARALLEL
+ || GET_CODE (op0) == LO_SUM)
{
- /* If the operand is a SAVE_EXPR, we can deal with this by
- forcing the SAVE_EXPR into memory. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR)
- {
- put_var_into_stack (TREE_OPERAND (exp, 0),
- /*rescan=*/true);
- op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0));
- }
+ /* If this object is in a register, it can't be BLKmode. */
+ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ rtx memloc = assign_temp (inner_type, 1, 1, 1);
+
+ if (GET_CODE (op0) == PARALLEL)
+ /* Handle calls that pass values in multiple
+ non-contiguous locations. The Irix 6 ABI has examples
+ of this. */
+ emit_group_store (memloc, op0, inner_type,
+ int_size_in_bytes (inner_type));
else
- {
- /* If this object is in a register, it can't be BLKmode. */
- tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
- rtx memloc = assign_temp (inner_type, 1, 1, 1);
-
- if (GET_CODE (op0) == PARALLEL)
- /* Handle calls that pass values in multiple
- non-contiguous locations. The Irix 6 ABI has examples
- of this. */
- emit_group_store (memloc, op0, inner_type,
- int_size_in_bytes (inner_type));
- else
- emit_move_insn (memloc, op0);
+ emit_move_insn (memloc, op0);
- op0 = memloc;
- }
+ op0 = memloc;
}
- if (GET_CODE (op0) != MEM)
+ if (!MEM_P (op0))
abort ();
mark_temp_addr_taken (op0);
if (case_low && case_high)
{
/* Case label is less than minimum for type. */
- if ((tree_int_cst_compare (case_low, min_value) < 0)
- && (tree_int_cst_compare (case_high, min_value) < 0))
+ if (TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (case_low, min_value) < 0
+ && tree_int_cst_compare (case_high, min_value) < 0)
{
warning ("case label value %d is less than minimum value for type",
TREE_INT_CST (case_low));
}
/* Case value is greater than maximum for type. */
- if ((tree_int_cst_compare (case_low, max_value) > 0)
- && (tree_int_cst_compare (case_high, max_value) > 0))
+ if (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (case_low, max_value) > 0
+ && tree_int_cst_compare (case_high, max_value) > 0)
{
warning ("case label value %d exceeds maximum value for type",
TREE_INT_CST (case_high));
}
/* Saturate lower case label value to minimum. */
- if ((tree_int_cst_compare (case_high, min_value) >= 0)
- && (tree_int_cst_compare (case_low, min_value) < 0))
+ if (TREE_CODE (min_value) == INTEGER_CST
+ && tree_int_cst_compare (case_high, min_value) >= 0
+ && tree_int_cst_compare (case_low, min_value) < 0)
{
warning ("lower value %d in case label range less than minimum value for type",
TREE_INT_CST (case_low));
}
/* Saturate upper case label value to maximum. */
- if ((tree_int_cst_compare (case_low, max_value) <= 0)
- && (tree_int_cst_compare (case_high, max_value) > 0))
+ if (TREE_CODE (max_value) == INTEGER_CST
+ && tree_int_cst_compare (case_low, max_value) <= 0
+ && tree_int_cst_compare (case_high, max_value) > 0)
{
warning ("upper value %d in case label range exceeds maximum value for type",
TREE_INT_CST (case_high));
return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1));
}
- if (icode != (int) CODE_FOR_nothing && GET_CODE (op0) == MEM)
+ if (icode != (int) CODE_FOR_nothing && MEM_P (op0))
{
rtx addr = (general_operand (XEXP (op0, 0), mode)
? force_reg (Pmode, XEXP (op0, 0))
return gen_rtx_raw_CONST_VECTOR (mode, v);
}
-\f
-/* Called to move the SAVE_EXPRs for parameter declarations in a
- nested function into the nested function. DATA is really the
- nested FUNCTION_DECL. */
-
-static tree
-set_save_expr_context (tree *tp,
- int *walk_subtrees,
- void *data)
-{
- if (TREE_CODE (*tp) == SAVE_EXPR && !SAVE_EXPR_CONTEXT (*tp))
- SAVE_EXPR_CONTEXT (*tp) = (tree) data;
- /* Do not walk back into the SAVE_EXPR_CONTEXT; that will cause
- circularity. */
- else if (DECL_P (*tp))
- *walk_subtrees = 0;
-
- return NULL;
-}
-
-
-static void
-execute_expand (void)
-{
- /* If the function has a variably modified type, there may be
- SAVE_EXPRs in the parameter types. Their context must be set to
- refer to this function; they cannot be expanded in the containing
- function. */
- if (decl_function_context (current_function_decl) == current_function_decl
- && variably_modified_type_p (TREE_TYPE (current_function_decl)))
- walk_tree (&TREE_TYPE (current_function_decl), set_save_expr_context,
- current_function_decl, NULL);
-
- /* Expand the variables recorded during gimple lowering. This must
- occur before the call to expand_function_start to ensure that
- all used variables are expanded before we expand anything on the
- PENDING_SIZES list. */
- expand_used_vars ();
-
- /* Set up parameters and prepare for return, for the function. */
- expand_function_start (current_function_decl, 0);
-
- /* If this function is `main', emit a call to `__main'
- to run global initializers, etc. */
- if (DECL_NAME (current_function_decl)
- && MAIN_NAME_P (DECL_NAME (current_function_decl))
- && DECL_FILE_SCOPE_P (current_function_decl))
- expand_main_function ();
-
- /* Generate the RTL for this function. */
- expand_expr_stmt_value (DECL_SAVED_TREE (current_function_decl), 0, 0);
-
- /* We hard-wired immediate_size_expand to zero above.
- expand_function_end will decrement this variable. So, we set the
- variable to one here, so that after the decrement it will remain
- zero. */
- immediate_size_expand = 1;
-
- /* Make sure the locus is set to the end of the function, so that
- epilogue line numbers and warnings are set properly. */
- if (cfun->function_end_locus.file)
- input_location = cfun->function_end_locus;
-
- /* The following insns belong to the top scope. */
- record_block_change (DECL_INITIAL (current_function_decl));
-
- /* Generate rtl for function exit. */
- expand_function_end ();
-}
-
-struct tree_opt_pass pass_expand =
-{
- "expand", /* name */
- NULL, /* gate */
- execute_expand, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_EXPAND, /* tv_id */
- /* ??? If TER is enabled, we actually receive GENERIC. */
- PROP_gimple_leh, /* properties_required */
- PROP_rtl, /* properties_provided */
- PROP_cfg | PROP_gimple_leh, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
-};
-
-
#include "gt-expr.h"