adjust_address_nv. Instead of preparing fallback support for an
invalid address, we call adjust_address_nv directly. */
if (MEM_P (cplx))
- emit_move_insn (adjust_address_nv (cplx, imode,
- imag_p ? GET_MODE_SIZE (imode) : 0),
- val);
+ {
+ emit_move_insn (adjust_address_nv (cplx, imode,
+ imag_p ? GET_MODE_SIZE (imode) : 0),
+ val);
+ return;
+ }
/* If the sub-object is at least word sized, then we know that subregging
will work. This special case is important, since store_bit_field
emitted, or NULL if such a move could not be generated. */
static rtx
-emit_move_via_integer (enum machine_mode mode, rtx x, rtx y)
+emit_move_via_integer (enum machine_mode mode, rtx x, rtx y, bool force)
{
enum machine_mode imode;
enum insn_code code;
if (code == CODE_FOR_nothing)
return NULL_RTX;
- x = emit_move_change_mode (imode, mode, x, false);
+ x = emit_move_change_mode (imode, mode, x, force);
if (x == NULL_RTX)
return NULL_RTX;
- y = emit_move_change_mode (imode, mode, y, false);
+ y = emit_move_change_mode (imode, mode, y, force);
if (y == NULL_RTX)
return NULL_RTX;
return emit_insn (GEN_FCN (code) (x, y));
return get_last_insn ();
}
- ret = emit_move_via_integer (mode, x, y);
+ ret = emit_move_via_integer (mode, x, y, true);
if (ret)
return ret;
}
}
/* Otherwise, find the MODE_INT mode of the same width. */
- ret = emit_move_via_integer (mode, x, y);
+ ret = emit_move_via_integer (mode, x, y, false);
gcc_assert (ret != NULL);
return ret;
}
fits within a HOST_WIDE_INT. */
if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- rtx ret = emit_move_via_integer (mode, x, y);
+ rtx ret = emit_move_via_integer (mode, x, y, false);
if (ret)
return ret;
}
offset = 0;
/* Now NOT_STACK gets the number of words that we don't need to
- allocate on the stack. Convert OFFSET to words too. */
+ allocate on the stack. Convert OFFSET to words too. */
not_stack = (partial - offset) / UNITS_PER_WORD;
offset /= UNITS_PER_WORD;
if (offset != 0)
{
- rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
+ rtx offset_rtx;
- gcc_assert (MEM_P (to_rtx));
+ if (!MEM_P (to_rtx))
+ {
+ /* We can get constant negative offsets into arrays with broken
+ user code. Translate this to a trap instead of ICEing. */
+ gcc_assert (TREE_CODE (offset) == INTEGER_CST);
+ expand_builtin_trap ();
+ to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
+ }
+ offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
#ifdef POINTERS_EXTEND_UNSIGNED
if (GET_MODE (offset_rtx) != Pmode)
offset_rtx = convert_to_mode (Pmode, offset_rtx, 0);
/* And now we have to find out if the element itself is fully
constructed. E.g. for union { struct { int a, b; } s; } u
= { .s = { .a = 1 } }. */
- if (elt_count == count_type_elements (init_sub_type))
+ if (elt_count == count_type_elements (init_sub_type, false))
clear_this = false;
}
}
}
/* Count the number of scalars in TYPE. Return -1 on overflow or
- variable-sized. */
+ variable-sized. If ALLOW_FLEXARR is true, don't count flexible
+ array member at the end of the structure. */
HOST_WIDE_INT
-count_type_elements (tree type)
+count_type_elements (tree type, bool allow_flexarr)
{
const HOST_WIDE_INT max = ~((HOST_WIDE_INT)1 << (HOST_BITS_PER_WIDE_INT-1));
switch (TREE_CODE (type))
if (telts && host_integerp (telts, 1))
{
HOST_WIDE_INT n = tree_low_cst (telts, 1) + 1;
- HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type));
+ HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type), false);
if (n == 0)
return 0;
else if (max / n > m)
for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
if (TREE_CODE (f) == FIELD_DECL)
{
- t = count_type_elements (TREE_TYPE (f));
+ t = count_type_elements (TREE_TYPE (f), false);
if (t < 0)
- return -1;
+ {
+ /* Check for structures with flexible array member. */
+ tree tf = TREE_TYPE (f);
+ if (allow_flexarr
+ && TREE_CHAIN (f) == NULL
+ && TREE_CODE (tf) == ARRAY_TYPE
+ && TYPE_DOMAIN (tf)
+ && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
+ && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
+ && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
+ && int_size_in_bytes (type) >= 0)
+ break;
+
+ return -1;
+ }
n += t;
}
if (must_clear)
return 1;
- elts = count_type_elements (TREE_TYPE (exp));
+ elts = count_type_elements (TREE_TYPE (exp), false);
return nz_elts < elts / 4;
}
return initializer_zerop (exp);
}
+
+/* Return 1 if EXP contains all zeros. */
+
+static int
+all_zeros_p (tree exp)
+{
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+
+ {
+ HOST_WIDE_INT nz_elts, nc_elts, count;
+ bool must_clear;
+
+ categorize_ctor_elements (exp, &nz_elts, &nc_elts, &count, &must_clear);
+ return nz_elts == 0;
+ }
+
+ return initializer_zerop (exp);
+}
\f
/* Helper function for store_constructor.
TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
result = convert_memory_address (tmode, result);
tmp = convert_memory_address (tmode, tmp);
- if (modifier == EXPAND_SUM)
+ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
result = gen_rtx_PLUS (tmode, result, tmp);
else
{
return const0_rtx;
}
+ /* Try to avoid creating a temporary at all. This is possible
+ if all of the initializer is zero.
+ FIXME: try to handle all [0..255] initializers we can handle
+ with memset. */
+ else if (TREE_STATIC (exp)
+ && !TREE_ADDRESSABLE (exp)
+ && target != 0 && mode == BLKmode
+ && all_zeros_p (exp))
+ {
+ clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
+ return target;
+ }
+
/* All elts simple constants => refer to a constant in memory. But
if this is a non-BLKmode mode, let it store a field at a time
since that should make a CONST_INT or CONST_DOUBLE when we
int icode;
rtx reg, insn;
- gcc_assert (modifier == EXPAND_NORMAL);
+ gcc_assert (modifier == EXPAND_NORMAL
+ || modifier == EXPAND_STACK_PARM);
/* The vectorizer should have already checked the mode. */
icode = movmisalign_optab->handlers[mode].insn_code;
|| modifier == EXPAND_STACK_PARM)
? modifier : EXPAND_NORMAL);
- /* If this is a constant, put it into a register if it is a
- legitimate constant and OFFSET is 0 and memory if it isn't. */
+ /* If this is a constant, put it into a register if it is a legitimate
+ constant, OFFSET is 0, and we won't try to extract outside the
+ register (in case we were passed a partially uninitialized object
+ or a view_conversion to a larger size). Force the constant to
+ memory otherwise. */
if (CONSTANT_P (op0))
{
enum machine_mode mode = TYPE_MODE (TREE_TYPE (tem));
if (mode != BLKmode && LEGITIMATE_CONSTANT_P (op0)
- && offset == 0)
+ && offset == 0
+ && bitpos + bitsize <= GET_MODE_BITSIZE (mode))
op0 = force_reg (mode, op0);
else
op0 = validize_mem (force_const_mem (mode, op0));
}
- /* Otherwise, if this object not in memory and we either have an
- offset or a BLKmode result, put it there. This case can't occur in
- C, but can in Ada if we have unchecked conversion of an expression
- from a scalar type to an array or record type or for an
- ARRAY_RANGE_REF whose type is BLKmode. */
+ /* Otherwise, if this object not in memory and we either have an
+ offset, a BLKmode result, or a reference outside the object, put it
+ there. Such cases can occur in Ada if we have unchecked conversion
+ of an expression from a scalar type to an array or record type or
+ for an ARRAY_RANGE_REF whose type is BLKmode. */
else if (!MEM_P (op0)
&& (offset != 0
+ || (bitpos + bitsize > GET_MODE_BITSIZE (GET_MODE (op0)))
|| (code == ARRAY_RANGE_REF && mode == BLKmode)))
{
tree nt = build_qualified_type (TREE_TYPE (tem),
case VIEW_CONVERT_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier);
- /* If the input and output modes are both the same, we are done.
- Otherwise, if neither mode is BLKmode and both are integral and within
- a word, we can use gen_lowpart. If neither is true, make sure the
- operand is in memory and convert the MEM to the new mode. */
+ /* If the input and output modes are both the same, we are done. */
if (TYPE_MODE (type) == GET_MODE (op0))
;
+ /* If neither mode is BLKmode, and both modes are the same size
+ then we can use gen_lowpart. */
else if (TYPE_MODE (type) != BLKmode && GET_MODE (op0) != BLKmode
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
- && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD
- && GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD)
- op0 = gen_lowpart (TYPE_MODE (type), op0);
+ && GET_MODE_SIZE (TYPE_MODE (type))
+ == GET_MODE_SIZE (GET_MODE (op0)))
+ {
+ if (GET_CODE (op0) == SUBREG)
+ op0 = force_reg (GET_MODE (op0), op0);
+ op0 = gen_lowpart (TYPE_MODE (type), op0);
+ }
+ /* If both modes are integral, then we can convert from one to the
+ other. */
+ else if (SCALAR_INT_MODE_P (GET_MODE (op0))
+ && SCALAR_INT_MODE_P (TYPE_MODE (type)))
+ op0 = convert_modes (TYPE_MODE (type), GET_MODE (op0), op0,
+ TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ /* As a last resort, spill op0 to memory, and reload it in a
+ different mode. */
else if (!MEM_P (op0))
{
/* If the operand is not a MEM, force it into memory. Since we
}
else if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& TREE_CONSTANT (TREE_OPERAND (exp, 0)))
{
rtx constant_part;