static rtx compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
static int is_zeros_p (tree);
-static int mostly_zeros_p (tree);
static void store_constructor_field (rtx, unsigned HOST_WIDE_INT,
HOST_WIDE_INT, enum machine_mode,
tree, tree, int, int);
void
init_expr (void)
{
- cfun->expr = (struct expr_status *) ggc_alloc (sizeof (struct expr_status));
+ cfun->expr = ggc_alloc (sizeof (struct expr_status));
pending_chain = 0;
pending_stack_adjust = 0;
return temp;
}
\f
-/* This macro is used to determine what the largest unit size that
- move_by_pieces can use is. */
-
-/* MOVE_MAX_PIECES is the number of bytes at a time which we can
- move efficiently, as opposed to MOVE_MAX which is the maximum
- number of bytes we can move with a single instruction. */
-
-#ifndef MOVE_MAX_PIECES
-#define MOVE_MAX_PIECES MOVE_MAX
-#endif
-
/* STORE_MAX_PIECES is the number of bytes at a time that we can
store efficiently. Due to internal GCC limitations, this is
MOVE_MAX_PIECES limited by the number of bytes GCC can represent
can be incorrect is coming from __builtin_memcpy. */
if (GET_CODE (size) == CONST_INT)
{
+ if (INTVAL (size) == 0)
+ return 0;
+
x = shallow_copy_rtx (x);
y = shallow_copy_rtx (y);
set_mem_size (x, size);
abort ();
length = XVECLEN (orig, 0);
- tmps = (rtx *) alloca (sizeof (rtx) * length);
+ tmps = alloca (sizeof (rtx) * length);
/* Skip a NULL entry in first slot. */
i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
}
-/* Emit code to move a block SRC to a block DST, where DST is non-consecutive
- registers represented by a PARALLEL. SSIZE represents the total size of
- block SRC in bytes, or -1 if not known. */
-/* ??? If SSIZE % UNITS_PER_WORD != 0, we make the blatant assumption that
- the balance will be in what would be the low-order memory addresses, i.e.
- left justified for big endian, right justified for little endian. This
- happens to be true for the targets currently using this support. If this
- ever changes, a new target macro along the lines of FUNCTION_ARG_PADDING
- would be needed. */
+/* Emit code to move a block ORIG_SRC of type TYPE to a block DST,
+ where DST is non-consecutive registers represented by a PARALLEL.
+ SSIZE represents the total size of block ORIG_SRC in bytes, or -1
+ if not known. */
void
-emit_group_load (rtx dst, rtx orig_src, int ssize)
+emit_group_load (rtx dst, rtx orig_src, tree type ATTRIBUTE_UNUSED, int ssize)
{
rtx *tmps, src;
int start, i;
else
start = 1;
- tmps = (rtx *) alloca (sizeof (rtx) * XVECLEN (dst, 0));
+ tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0));
/* Process the pieces. */
for (i = start; i < XVECLEN (dst, 0); i++)
/* Handle trailing fragments that run over the size of the struct. */
if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
{
- shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
+ /* Arrange to shift the fragment to where it belongs.
+ extract_bit_field loads to the lsb of the reg. */
+ if (
+#ifdef BLOCK_REG_PADDING
+ BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
+ == (BYTES_BIG_ENDIAN ? upward : downward)
+#else
+ BYTES_BIG_ENDIAN
+#endif
+ )
+ shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
bytelen = ssize - bytepos;
if (bytelen <= 0)
abort ();
/* Optimize the access just a bit. */
if (GET_CODE (src) == MEM
- && MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode)
+ && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src))
+ || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
&& bytelen == GET_MODE_SIZE (mode))
{
bytepos * BITS_PER_UNIT, 1, NULL_RTX,
mode, mode, ssize);
- if (BYTES_BIG_ENDIAN && shift)
+ if (shift)
expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift),
tmps[i], 0, OPTAB_WIDEN);
}
XEXP (XVECEXP (src, 0, i), 0));
}
-/* Emit code to move a block SRC to a block DST, where SRC is non-consecutive
- registers represented by a PARALLEL. SSIZE represents the total size of
- block DST, or -1 if not known. */
+/* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
+ where SRC is non-consecutive registers represented by a PARALLEL.
+ SSIZE represents the total size of block ORIG_DST, or -1 if not
+ known. */
void
-emit_group_store (rtx orig_dst, rtx src, int ssize)
+emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize)
{
rtx *tmps, dst;
int start, i;
else
start = 1;
- tmps = (rtx *) alloca (sizeof (rtx) * XVECLEN (src, 0));
+ tmps = alloca (sizeof (rtx) * XVECLEN (src, 0));
/* Copy the (probable) hard regs into pseudos. */
for (i = start; i < XVECLEN (src, 0); i++)
the temporary. */
temp = assign_stack_temp (GET_MODE (dst), ssize, 0);
- emit_group_store (temp, src, ssize);
- emit_group_load (dst, temp, ssize);
+ emit_group_store (temp, src, type, ssize);
+ emit_group_load (dst, temp, type, ssize);
return;
}
else if (GET_CODE (dst) != MEM && GET_CODE (dst) != CONCAT)
/* Handle trailing fragments that run over the size of the struct. */
if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
{
- if (BYTES_BIG_ENDIAN)
+ /* store_bit_field always takes its value from the lsb.
+ Move the fragment to the lsb if it's not already there. */
+ if (
+#ifdef BLOCK_REG_PADDING
+ BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
+ == (BYTES_BIG_ENDIAN ? upward : downward)
+#else
+ BYTES_BIG_ENDIAN
+#endif
+ )
{
int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
expand_binop (mode, ashr_optab, tmps[i], GEN_INT (shift),
/* Optimize the access just a bit. */
if (GET_CODE (dest) == MEM
- && MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode)
+ && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
+ || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
&& bytelen == GET_MODE_SIZE (mode))
emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
int reverse;
rtx cst;
+ if (len == 0)
+ return 1;
+
if (! STORE_BY_PIECES_P (len, align))
return 0;
{
struct store_by_pieces data;
+ if (len == 0)
+ {
+ if (endp == 2)
+ abort ();
+ return to;
+ }
+
if (! STORE_BY_PIECES_P (len, align))
abort ();
to = protect_from_queue (to, 1);
{
struct store_by_pieces data;
+ if (len == 0)
+ return;
+
data.constfun = clear_by_pieces_1;
data.constfundata = NULL;
data.len = len;
object = protect_from_queue (object, 1);
size = protect_from_queue (size, 0);
- if (GET_CODE (size) == CONST_INT
+ if (size == const0_rtx)
+ ;
+ else if (GET_CODE (size) == CONST_INT
&& CLEAR_BY_PIECES_P (INTVAL (size), align))
clear_by_pieces (object, INTVAL (size), align);
else if (clear_storage_via_clrstr (object, size, align))
/* Handle calls that pass values in multiple non-contiguous locations.
The Irix 6 ABI has examples of this. */
if (GET_CODE (reg) == PARALLEL)
- emit_group_load (reg, x, -1); /* ??? size? */
+ emit_group_load (reg, x, type, -1);
else
move_block_to_reg (REGNO (reg), x, partial, mode);
}
If WANT_VALUE is nonzero, return an rtx for the value of TO.
(This may contain a QUEUED rtx;
if the value is constant, this rtx is a constant.)
- Otherwise, the returned value is NULL_RTX.
-
- SUGGEST_REG is no longer actually used.
- It used to mean, copy the value through a register
- and return that register, if that is possible.
- We now use WANT_VALUE to decide whether to do this. */
+ Otherwise, the returned value is NULL_RTX. */
rtx
-expand_assignment (tree to, tree from, int want_value,
- int suggest_reg ATTRIBUTE_UNUSED)
+expand_assignment (tree to, tree from, int want_value)
{
rtx to_rtx = 0;
rtx result;
/* Handle calls that return values in multiple non-contiguous locations.
The Irix 6 ABI has examples of this. */
if (GET_CODE (to_rtx) == PARALLEL)
- emit_group_load (to_rtx, value, int_size_in_bytes (TREE_TYPE (from)));
+ emit_group_load (to_rtx, value, TREE_TYPE (from),
+ int_size_in_bytes (TREE_TYPE (from)));
else if (GET_MODE (to_rtx) == BLKmode)
emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
else
temp = expand_expr (from, 0, GET_MODE (to_rtx), 0);
if (GET_CODE (to_rtx) == PARALLEL)
- emit_group_load (to_rtx, temp, int_size_in_bytes (TREE_TYPE (from)));
+ emit_group_load (to_rtx, temp, TREE_TYPE (from),
+ int_size_in_bytes (TREE_TYPE (from)));
else
emit_move_insn (to_rtx, temp);
{
/* C++ can generate ?: expressions with a throw expression in one
branch and an rvalue in the other. Here, we resolve attempts to
- store the throw expression's nonexistant result. */
+ store the throw expression's nonexistent result. */
if (want_value)
abort ();
expand_expr (exp, const0_rtx, VOIDmode, 0);
/* Handle calls that return values in multiple non-contiguous locations.
The Irix 6 ABI has examples of this. */
else if (GET_CODE (target) == PARALLEL)
- emit_group_load (target, temp, int_size_in_bytes (TREE_TYPE (exp)));
+ emit_group_load (target, temp, TREE_TYPE (exp),
+ int_size_in_bytes (TREE_TYPE (exp)));
else if (GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
(want_value & 2
return target;
}
\f
-/* Return 1 if EXP just contains zeros. */
+/* Return 1 if EXP just contains zeros. FIXME merge with initializer_zerop. */
static int
is_zeros_p (tree exp)
/* Return 1 if EXP contains mostly (3/4) zeros. */
-static int
+int
mostly_zeros_p (tree exp)
{
if (TREE_CODE (exp) == CONSTRUCTOR)
{
tree elt;
+ /* If size is zero or the target is already cleared, do nothing. */
+ if (size == 0 || cleared)
+ cleared = 1;
/* We either clear the aggregate or indicate the value is dead. */
- if ((TREE_CODE (type) == UNION_TYPE
- || TREE_CODE (type) == QUAL_UNION_TYPE)
- && ! cleared
- && ! CONSTRUCTOR_ELTS (exp))
+ else if ((TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && ! CONSTRUCTOR_ELTS (exp))
/* If the constructor is empty, clear the union. */
{
clear_storage (target, expr_size (exp));
set the initial value as zero so we can fold the value into
a constant. But if more than one register is involved,
this probably loses. */
- else if (! cleared && GET_CODE (target) == REG && TREE_STATIC (exp)
+ else if (GET_CODE (target) == REG && TREE_STATIC (exp)
&& GET_MODE_SIZE (GET_MODE (target)) <= UNITS_PER_WORD)
{
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
clear the whole structure first. Don't do this if TARGET is a
register whose mode size isn't equal to SIZE since clear_storage
can't handle this case. */
- else if (! cleared && size > 0
- && ((list_length (CONSTRUCTOR_ELTS (exp))
- != fields_length (type))
- || mostly_zeros_p (exp))
+ else if (((list_length (CONSTRUCTOR_ELTS (exp)) != fields_length (type))
+ || mostly_zeros_p (exp))
&& (GET_CODE (target) != REG
|| ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target))
== size)))
{
unsigned int set_word_size = TYPE_ALIGN (TREE_TYPE (exp));
enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1);
- char *bit_buffer = (char *) alloca (nbits);
+ char *bit_buffer = alloca (nbits);
HOST_WIDE_INT word = 0;
unsigned int bit_pos = 0;
unsigned int ibit = 0;
part of the expression. */
return safe_from_p (x, TREE_OPERAND (exp, 1), 0);
- case METHOD_CALL_EXPR:
- /* This takes an rtx argument, but shouldn't appear here. */
- abort ();
-
default:
break;
}
/* We can find the lowest bit that's a one. If the low
HOST_BITS_PER_WIDE_INT bits are zero, return BIGGEST_ALIGNMENT.
We need to handle this case since we can find it in a COND_EXPR,
- a MIN_EXPR, or a MAX_EXPR. If the constant overlows, we have an
+ a MIN_EXPR, or a MAX_EXPR. If the constant overflows, we have an
erroneous program, so return BIGGEST_ALIGNMENT to avoid any
later ICE. */
if (TREE_CONSTANT_OVERFLOW (exp))
case PARM_DECL:
if (!DECL_RTL_SET_P (exp))
{
- error_with_decl (exp, "prior parameter's size depends on `%s'");
+ error ("%Hprior parameter's size depends on '%D'",
+ &DECL_SOURCE_LOCATION (exp), exp);
return CONST0_RTX (mode);
}
input_filename = EXPR_WFL_FILENAME (exp);
input_line = EXPR_WFL_LINENO (exp);
if (EXPR_WFL_EMIT_LINE_NOTE (exp))
- emit_line_note (input_filename, input_line);
+ emit_line_note (input_location);
/* Possibly avoid switching back and forth here. */
to_return = expand_expr (EXPR_WFL_NODE (exp), target, tmode, modifier);
input_location = saved_loc;
op0 = validize_mem (force_const_mem (mode, op0));
}
+ /* Otherwise, if this object not in memory and we either have an
+ offset or a BLKmode result, put it there. This case can't occur in
+ C, but can in Ada if we have unchecked conversion of an expression
+ from a scalar type to an array or record type or for an
+ ARRAY_RANGE_REF whose type is BLKmode. */
+ else if (GET_CODE (op0) != MEM
+ && (offset != 0
+ || (code == ARRAY_RANGE_REF && mode == BLKmode)))
+ {
+ /* If the operand is a SAVE_EXPR, we can deal with this by
+ forcing the SAVE_EXPR into memory. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR)
+ {
+ put_var_into_stack (TREE_OPERAND (exp, 0),
+ /*rescan=*/true);
+ op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0));
+ }
+ else
+ {
+ tree nt
+ = build_qualified_type (TREE_TYPE (tem),
+ (TYPE_QUALS (TREE_TYPE (tem))
+ | TYPE_QUAL_CONST));
+ rtx memloc = assign_temp (nt, 1, 1, 1);
+
+ emit_move_insn (memloc, op0);
+ op0 = memloc;
+ }
+ }
+
if (offset != 0)
{
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
EXPAND_SUM);
- /* If this object is in a register, put it into memory.
- This case can't occur in C, but can in Ada if we have
- unchecked conversion of an expression from a scalar type to
- an array or record type. */
- if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
- || GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF)
- {
- /* If the operand is a SAVE_EXPR, we can deal with this by
- forcing the SAVE_EXPR into memory. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR)
- {
- put_var_into_stack (TREE_OPERAND (exp, 0),
- /*rescan=*/true);
- op0 = SAVE_EXPR_RTL (TREE_OPERAND (exp, 0));
- }
- else
- {
- tree nt
- = build_qualified_type (TREE_TYPE (tem),
- (TYPE_QUALS (TREE_TYPE (tem))
- | TYPE_QUAL_CONST));
- rtx memloc = assign_temp (nt, 1, 1, 1);
-
- emit_move_insn (memloc, op0);
- op0 = memloc;
- }
- }
-
if (GET_CODE (op0) != MEM)
abort ();
|| (mode1 != BLKmode
&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
|| (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
- && SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0)))
+ && ((modifier == EXPAND_CONST_ADDRESS
+ || modifier == EXPAND_INITIALIZER)
+ ? STRICT_ALIGNMENT
+ : SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0))))
|| (bitpos % BITS_PER_UNIT != 0)))
/* If the type and the field are a constant size and the
size of the type isn't the same size as the bitfield,
if (modifier == EXPAND_STACK_PARM)
target = 0;
- /* Handle complex values specially. */
+ /* ABS_EXPR is not valid for complex arguments. */
if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
- return expand_complex_abs (mode, op0, target, unsignedp);
+ abort ();
/* Unsigned abs is simply the operand. Testing here means we don't
risk generating incorrect code below. */
tree lhs = TREE_OPERAND (exp, 0);
tree rhs = TREE_OPERAND (exp, 1);
- temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0);
+ temp = expand_assignment (lhs, rhs, ! ignore);
return temp;
}
(TREE_CODE (rhs) == BIT_IOR_EXPR
? integer_one_node
: integer_zero_node)),
- 0, 0);
+ 0);
do_pending_stack_adjust ();
emit_label (label);
return const0_rtx;
}
- temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0);
+ temp = expand_assignment (lhs, rhs, ! ignore);
return temp;
}
/* Handle calls that pass values in multiple
non-contiguous locations. The Irix 6 ABI has examples
of this. */
- emit_group_store (memloc, op0,
+ emit_group_store (memloc, op0, inner_type,
int_size_in_bytes (inner_type));
else
emit_move_insn (memloc, op0);
incremented = TREE_OPERAND (incremented, 0);
}
- temp = expand_assignment (incremented, newexp, ! post && ! ignore , 0);
+ temp = expand_assignment (incremented, newexp, ! post && ! ignore);
return post ? op0 : temp;
}
do this by shifting the bit being tested to the low-order bit and
masking the result with the constant 1. If the condition was EQ,
we xor it with 1. This does not require an scc insn and is faster
- than an scc insn even if we have it. */
+ than an scc insn even if we have it.
+
+ The code to make this transformation was moved into fold_single_bit_test,
+ so we just call into the folder and expand its result. */
if ((code == NE || code == EQ)
&& TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1)
&& integer_pow2p (TREE_OPERAND (arg0, 1)))
{
- tree inner = TREE_OPERAND (arg0, 0);
- int bitnum = tree_log2 (TREE_OPERAND (arg0, 1));
- int ops_unsignedp;
-
- /* If INNER is a right shift of a constant and it plus BITNUM does
- not overflow, adjust BITNUM and INNER. */
-
- if (TREE_CODE (inner) == RSHIFT_EXPR
- && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
- && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0
- && bitnum < TYPE_PRECISION (type)
- && 0 > compare_tree_int (TREE_OPERAND (inner, 1),
- bitnum - TYPE_PRECISION (type)))
- {
- bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1));
- inner = TREE_OPERAND (inner, 0);
- }
-
- /* If we are going to be able to omit the AND below, we must do our
- operations as unsigned. If we must use the AND, we have a choice.
- Normally unsigned is faster, but for some machines signed is. */
- ops_unsignedp = (bitnum == TYPE_PRECISION (type) - 1 ? 1
-#ifdef LOAD_EXTEND_OP
- : (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND ? 0 : 1)
-#else
- : 1
-#endif
- );
-
- if (! get_subtarget (subtarget)
- || GET_MODE (subtarget) != operand_mode
- || ! safe_from_p (subtarget, inner, 1))
- subtarget = 0;
-
- op0 = expand_expr (inner, subtarget, VOIDmode, 0);
-
- if (bitnum != 0)
- op0 = expand_shift (RSHIFT_EXPR, operand_mode, op0,
- size_int (bitnum), subtarget, ops_unsignedp);
-
- if (GET_MODE (op0) != mode)
- op0 = convert_to_mode (mode, op0, ops_unsignedp);
-
- if ((code == EQ && ! invert) || (code == NE && invert))
- op0 = expand_binop (mode, xor_optab, op0, const1_rtx, subtarget,
- ops_unsignedp, OPTAB_LIB_WIDEN);
-
- /* Put the AND last so it can combine with more things. */
- if (bitnum != TYPE_PRECISION (type) - 1)
- op0 = expand_and (mode, op0, const1_rtx, subtarget);
-
- return op0;
+ tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
+ return expand_expr (fold_single_bit_test (code == NE ? NE_EXPR : EQ_EXPR,
+ arg0, arg1, type),
+ target, VOIDmode, EXPAND_NORMAL);
}
/* Now see if we are likely to be able to do this. Return if not. */