return temp;
}
\f
+/* Return the largest alignment we can use for doing a move (or store)
+ of MAX_PIECES. ALIGN is the largest alignment we could use. */
+
+static unsigned int
+alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
+{
+ enum machine_mode tmode;
+
+ tmode = mode_for_size (max_pieces * BITS_PER_UNIT, MODE_INT, 1);
+ if (align >= GET_MODE_ALIGNMENT (tmode))
+ align = GET_MODE_ALIGNMENT (tmode);
+ else
+ {
+ enum machine_mode tmode, xmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
+ tmode != VOIDmode;
+ xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) > max_pieces
+ || SLOW_UNALIGNED_ACCESS (tmode, align))
+ break;
+
+ align = MAX (align, GET_MODE_ALIGNMENT (xmode));
+ }
+
+ return align;
+}
+
+/* Return the widest integer mode no wider than SIZE. If no such mode
+ can be found, return VOIDmode. */
+
+static enum machine_mode
+widest_int_mode_for_size (unsigned int size)
+{
+ enum machine_mode tmode, mode = VOIDmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < size)
+ mode = tmode;
+
+ return mode;
+}
+
/* STORE_MAX_PIECES is the number of bytes at a time that we can
store efficiently. Due to internal GCC limitations, this is
MOVE_MAX_PIECES limited by the number of bytes GCC can represent
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (from));
rtx to_addr, from_addr = XEXP (from, 0);
unsigned int max_size = MOVE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from));
if (!(data.autinc_from && data.autinc_to)
&& move_by_pieces_ninsns (len, align, max_size) > 2)
{
- /* Find the mode of the largest move... */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Find the mode of the largest move...
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from)
{
data.to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
unsigned int max_size)
{
unsigned HOST_WIDE_INT n_insns = 0;
- enum machine_mode tmode;
-
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode tmode, xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
while (max_size > 1)
{
- enum machine_mode mode = VOIDmode;
+ enum machine_mode mode;
enum insn_code icode;
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
&& (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))
tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
(bytepos % slen0) * BITS_PER_UNIT,
- 1, NULL_RTX, mode, mode);
+ 1, false, NULL_RTX, mode, mode);
}
else
{
mem = assign_stack_temp (GET_MODE (src), slen, 0);
emit_move_insn (mem, src);
tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
- 0, 1, NULL_RTX, mode, mode);
+ 0, 1, false, NULL_RTX, mode, mode);
}
}
/* FIXME: A SIMD parallel will eventually lead to a subreg of a
tmps[i] = src;
else
tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
- bytepos * BITS_PER_UNIT, 1, NULL_RTX,
+ bytepos * BITS_PER_UNIT, 1, false, NULL_RTX,
mode, mode);
if (shift)
bitpos for the destination store (left justified). */
store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, copy_mode,
extract_bit_field (src, bitsize,
- xbitpos % BITS_PER_WORD, 1,
+ xbitpos % BITS_PER_WORD, 1, false,
NULL_RTX, copy_mode, copy_mode));
}
unsigned HOST_WIDE_INT l;
unsigned int max_size;
HOST_WIDE_INT offset = 0;
- enum machine_mode mode, tmode;
+ enum machine_mode mode;
enum insn_code icode;
int reverse;
- rtx cst;
+ /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
+ rtx cst ATTRIBUTE_UNUSED;
if (len == 0)
return 1;
: STORE_BY_PIECES_P (len, align)))
return 0;
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* We would first store what we can in the largest integer mode, then go to
successively smaller modes. */
reverse++)
{
l = len;
- mode = VOIDmode;
max_size = STORE_MAX_PIECES + 1;
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to));
rtx to_addr = XEXP (data->to, 0);
unsigned int max_size = STORE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
data->offset = 0;
if (!data->autinc_to
&& move_by_pieces_ninsns (data->len, align, max_size) > 2)
{
- /* Determine the main mode we'll be using. */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Determine the main mode we'll be using.
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to)
{
data->to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* First store what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
}
return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
- true, NULL_RTX, imode, imode);
+ true, false, NULL_RTX, imode, imode);
}
\f
/* A subroutine of emit_move_insn_1. Yet another lowpart generator.
|| align >= BIGGEST_ALIGNMENT
|| (PUSH_ROUNDING (align / BITS_PER_UNIT)
== (align / BITS_PER_UNIT)))
- && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ && (HOST_WIDE_INT) PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
{
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
{
rtx to_rtx = 0;
rtx result;
+ enum machine_mode mode;
+ int align, icode;
/* Don't crash if the lhs of the assignment was erroneous. */
if (TREE_CODE (to) == ERROR_MARK)
if (operand_equal_p (to, from, 0))
return;
+ mode = TYPE_MODE (TREE_TYPE (to));
+ if ((TREE_CODE (to) == MEM_REF
+ || TREE_CODE (to) == TARGET_MEM_REF)
+ && mode != BLKmode
+ && ((align = MAX (TYPE_ALIGN (TREE_TYPE (to)),
+ get_object_alignment (to, BIGGEST_ALIGNMENT)))
+ < (signed) GET_MODE_ALIGNMENT (mode))
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
+ {
+ enum machine_mode address_mode, op_mode1;
+ rtx insn, reg, op0, mem;
+
+ reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ reg = force_not_mem (reg);
+
+ if (TREE_CODE (to) == MEM_REF)
+ {
+ addr_space_t as
+ = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (to, 1))));
+ tree base = TREE_OPERAND (to, 0);
+ address_mode = targetm.addr_space.address_mode (as);
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ op0 = convert_memory_address_addr_space (address_mode, op0, as);
+ if (!integer_zerop (TREE_OPERAND (to, 1)))
+ {
+ rtx off
+ = immed_double_int_const (mem_ref_offset (to), address_mode);
+ op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
+ }
+ op0 = memory_address_addr_space (mode, op0, as);
+ mem = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (mem, to, 0);
+ set_mem_addr_space (mem, as);
+ }
+ else if (TREE_CODE (to) == TARGET_MEM_REF)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (to));
+ struct mem_address addr;
+
+ get_address_description (to, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
+ op0 = memory_address_addr_space (mode, op0, as);
+ mem = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (mem, to, 0);
+ set_mem_addr_space (mem, as);
+ }
+ else
+ gcc_unreachable ();
+ if (TREE_THIS_VOLATILE (to))
+ MEM_VOLATILE_P (mem) = 1;
+
+ op_mode1 = insn_data[icode].operand[1].mode;
+ if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1)
+ && op_mode1 != VOIDmode)
+ reg = copy_to_mode_reg (op_mode1, reg);
+
+ insn = GEN_FCN (icode) (mem, reg);
+ /* The movmisalign<mode> pattern cannot fail, else the assignment would
+ silently be omitted. */
+ gcc_assert (insn != NULL_RTX);
+ emit_insn (insn);
+ return;
+ }
+
/* Assignment of a structure component needs special treatment
if the structure component's rtx is not simply a MEM.
Assignment of an array element at a constant index, and assignment of
to_rtx = expand_normal (tem);
/* If the bitfield is volatile, we want to access it in the
- field's mode, not the computed mode. */
- if (volatilep
- && GET_CODE (to_rtx) == MEM
- && flag_strict_volatile_bitfields > 0)
- to_rtx = adjust_address (to_rtx, mode1, 0);
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (to_rtx))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ to_rtx = adjust_address (to_rtx, mode1, 0);
+ else if (GET_MODE (to_rtx) == VOIDmode)
+ to_rtx = adjust_address (to_rtx, BLKmode, 0);
+ }
if (offset != 0)
{
return;
}
- else if (TREE_CODE (to) == MISALIGNED_INDIRECT_REF)
- {
- addr_space_t as = ADDR_SPACE_GENERIC;
- enum machine_mode mode, op_mode1;
- enum insn_code icode;
- rtx reg, addr, mem, insn;
-
- if (POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (to, 0))))
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (to, 0))));
-
- reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
- reg = force_not_mem (reg);
-
- mode = TYPE_MODE (TREE_TYPE (to));
- addr = expand_expr (TREE_OPERAND (to, 0), NULL_RTX, VOIDmode,
- EXPAND_SUM);
- addr = memory_address_addr_space (mode, addr, as);
- mem = gen_rtx_MEM (mode, addr);
-
- set_mem_attributes (mem, to, 0);
- set_mem_addr_space (mem, as);
-
- icode = optab_handler (movmisalign_optab, mode);
- gcc_assert (icode != CODE_FOR_nothing);
-
- op_mode1 = insn_data[icode].operand[1].mode;
- if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1)
- && op_mode1 != VOIDmode)
- reg = copy_to_mode_reg (op_mode1, reg);
-
- insn = GEN_FCN (icode) (mem, reg);
- emit_insn (insn);
- return;
- }
-
/* If the rhs is a function call and its value is not an aggregate,
call the function before we start to compute the lhs.
This is needed for correct code for cases such as
return NULL_RTX;
}
- else if (TREE_CODE (exp) == STRING_CST
+ else if ((TREE_CODE (exp) == STRING_CST
+ || (TREE_CODE (exp) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == STRING_CST
+ && integer_zerop (TREE_OPERAND (exp, 1))))
&& !nontemporal && !call_param_p
- && TREE_STRING_LENGTH (exp) > 0
- && TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
+ && MEM_P (target))
{
/* Optimize initialization of an array with a STRING_CST. */
HOST_WIDE_INT exp_len, str_copy_len;
rtx dest_mem;
+ tree str = TREE_CODE (exp) == STRING_CST
+ ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
exp_len = int_expr_size (exp);
if (exp_len <= 0)
goto normal_expr;
- str_copy_len = strlen (TREE_STRING_POINTER (exp));
- if (str_copy_len < TREE_STRING_LENGTH (exp) - 1)
- goto normal_expr;
-
- str_copy_len = TREE_STRING_LENGTH (exp);
- if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
- {
- str_copy_len += STORE_MAX_PIECES - 1;
- str_copy_len &= ~(STORE_MAX_PIECES - 1);
- }
- str_copy_len = MIN (str_copy_len, exp_len);
- if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (exp)),
- MEM_ALIGN (target), false))
- goto normal_expr;
-
- dest_mem = target;
-
- dest_mem = store_by_pieces (dest_mem,
- str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (exp)),
- MEM_ALIGN (target), false,
- exp_len > str_copy_len ? 1 : 0);
- if (exp_len > str_copy_len)
- clear_storage (adjust_address (dest_mem, BLKmode, 0),
- GEN_INT (exp_len - str_copy_len),
- BLOCK_OP_NORMAL);
- return NULL_RTX;
- }
- else if (TREE_CODE (exp) == MEM_REF
- && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == STRING_CST
- && integer_zerop (TREE_OPERAND (exp, 1))
- && !nontemporal && !call_param_p
- && TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
- {
- /* Optimize initialization of an array with a STRING_CST. */
- HOST_WIDE_INT exp_len, str_copy_len;
- rtx dest_mem;
- tree str = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
-
- exp_len = int_expr_size (exp);
- if (exp_len <= 0)
+ if (TREE_STRING_LENGTH (str) <= 0)
goto normal_expr;
str_copy_len = strlen (TREE_STRING_POINTER (str));
goto normal_expr;
str_copy_len = TREE_STRING_LENGTH (str);
- if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
+ if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0
+ && TREE_STRING_POINTER (str)[TREE_STRING_LENGTH (str) - 1] == '\0')
{
str_copy_len += STORE_MAX_PIECES - 1;
str_copy_len &= ~(STORE_MAX_PIECES - 1);
}
str_copy_len = MIN (str_copy_len, exp_len);
if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (str)),
+ CONST_CAST (char *, TREE_STRING_POINTER (str)),
MEM_ALIGN (target), false))
goto normal_expr;
dest_mem = store_by_pieces (dest_mem,
str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (str)),
+ CONST_CAST (char *,
+ TREE_STRING_POINTER (str)),
MEM_ALIGN (target), false,
exp_len > str_copy_len ? 1 : 0);
if (exp_len > str_copy_len)
{
int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
if (GET_MODE (target) == BLKmode
- || GET_MODE (temp) == BLKmode)
+ && GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
(call_param_p
? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL));
+ else if (GET_MODE (target) == BLKmode)
+ store_bit_field (target, INTVAL (expr_size (exp)) * BITS_PER_UNIT,
+ 0, GET_MODE (temp), temp);
else
convert_move (target, temp, unsignedp);
}
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
- for (idx = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
- idx++)
+ FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce)
if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
|| !safe_from_p (x, ce->value, 0))
return 0;
}
break;
- case MISALIGNED_INDIRECT_REF:
- case INDIRECT_REF:
+ case MEM_REF:
if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
get_alias_set (exp)))
int ignore;
bool reduce_bit_field;
location_t loc = ops->location;
- tree treeop0, treeop1;
+ tree treeop0, treeop1, treeop2;
#define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
? reduce_to_bit_field_precision ((expr), \
target, \
treeop0 = ops->op0;
treeop1 = ops->op1;
+ treeop2 = ops->op2;
/* We should be called only on simple (binary or unary) expressions,
exactly those that are valid in gimple expressions that aren't
}
}
+ /* Use TER to expand pointer addition of a negated value
+ as pointer subtraction. */
+ if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
+ || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
+ && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
+ && TREE_CODE (treeop1) == SSA_NAME
+ && TYPE_MODE (TREE_TYPE (treeop0))
+ == TYPE_MODE (TREE_TYPE (treeop1)))
+ {
+ gimple def = get_def_for_expr (treeop1, NEGATE_EXPR);
+ if (def)
+ {
+ treeop1 = gimple_assign_rhs1 (def);
+ code = MINUS_EXPR;
+ goto do_minus;
+ }
+ }
+
/* No sense saving up arithmetic to be done
if it's all in the wrong mode to form part of an address.
And force_operand won't know whether to sign-extend or
return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
case MINUS_EXPR:
+ do_minus:
/* For initializers, we are allowed to return a MINUS of two
symbolic constants. Here we handle all cases when both operands
are constant. */
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (ops->op2);
+ op2 = expand_normal (treeop2);
target = expand_widen_pattern_expr (ops, op0, op1, op2,
target, unsignedp);
return target;
expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
+ case FMA_EXPR:
+ {
+ optab opt = fma_optab;
+ gimple def0, def2;
+
+ def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
+ def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
+
+ op0 = op2 = NULL;
+
+ if (def0 && def2
+ && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnms_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+ else if (def0
+ && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnma_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ }
+ else if (def2
+ && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fms_optab;
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+
+ if (op0 == NULL)
+ op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
+ if (op2 == NULL)
+ op2 = expand_normal (treeop2);
+ op1 = expand_normal (treeop1);
+
+ return expand_ternary_op (TYPE_MODE (type), opt,
+ op0, op1, op2, target, 0);
+ }
+
case MULT_EXPR:
/* If this is a fixed-point operation, then we cannot use the code
below because "expand_mult" doesn't support sat/no-sat fixed-point
gcc_assert (!context
|| context == current_function_decl
|| TREE_STATIC (exp)
+ || DECL_EXTERNAL (exp)
/* ??? C++ creates functions that are not TREE_STATIC. */
|| TREE_CODE (exp) == FUNCTION_DECL);
return expand_constructor (exp, target, modifier, false);
- case MISALIGNED_INDIRECT_REF:
- case INDIRECT_REF:
+ case TARGET_MEM_REF:
{
- tree exp1 = treeop0;
- addr_space_t as = ADDR_SPACE_GENERIC;
-
- if (modifier != EXPAND_WRITE)
- {
- tree t;
-
- t = fold_read_from_constant_string (exp);
- if (t)
- return expand_expr (t, target, tmode, modifier);
- }
-
- if (POINTER_TYPE_P (TREE_TYPE (exp1)))
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp1)));
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
+ struct mem_address addr;
+ int icode, align;
- op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+ get_address_description (exp, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
op0 = memory_address_addr_space (mode, op0, as);
-
temp = gen_rtx_MEM (mode, op0);
-
set_mem_attributes (temp, exp, 0);
set_mem_addr_space (temp, as);
-
- /* Resolve the misalignment now, so that we don't have to remember
- to resolve it later. Of course, this only works for reads. */
- if (code == MISALIGNED_INDIRECT_REF)
+ align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
+ get_object_alignment (exp, BIGGEST_ALIGNMENT));
+ if (mode != BLKmode
+ && (unsigned) align < GET_MODE_ALIGNMENT (mode)
+ /* If the target does not have special handling for unaligned
+ loads of mode then it can use regular moves for them. */
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
{
- int icode;
rtx reg, insn;
- gcc_assert (modifier == EXPAND_NORMAL
- || modifier == EXPAND_STACK_PARM);
-
- /* The vectorizer should have already checked the mode. */
- icode = optab_handler (movmisalign_optab, mode);
- gcc_assert (icode != CODE_FOR_nothing);
-
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail. */
reg = gen_reg_rtx (mode);
/* Nor can the insn generator. */
insn = GEN_FCN (icode) (reg, temp);
+ gcc_assert (insn != NULL_RTX);
emit_insn (insn);
return reg;
}
-
return temp;
}
- case TARGET_MEM_REF:
- {
- addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
- struct mem_address addr;
- tree base;
-
- get_address_description (exp, &addr);
- op0 = addr_for_mem_ref (&addr, as, true);
- op0 = memory_address_addr_space (mode, op0, as);
- temp = gen_rtx_MEM (mode, op0);
- set_mem_attributes (temp, TMR_ORIGINAL (exp), 0);
- set_mem_addr_space (temp, as);
- base = get_base_address (TMR_ORIGINAL (exp));
- if (INDIRECT_REF_P (base)
- && TMR_BASE (exp)
- && TREE_CODE (TMR_BASE (exp)) == SSA_NAME
- && POINTER_TYPE_P (TREE_TYPE (TMR_BASE (exp))))
- {
- set_mem_expr (temp, build1 (INDIRECT_REF,
- TREE_TYPE (exp), TMR_BASE (exp)));
- set_mem_offset (temp, NULL_RTX);
- }
- }
- return temp;
-
case MEM_REF:
{
addr_space_t as
enum machine_mode address_mode;
tree base = TREE_OPERAND (exp, 0);
gimple def_stmt;
+ int icode, align;
/* Handle expansion of non-aliased memory with non-BLKmode. That
might end up in a register. */
if (TREE_CODE (base) == ADDR_EXPR)
address_mode = targetm.addr_space.address_mode (as);
base = TREE_OPERAND (exp, 0);
if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
- base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
- gimple_assign_rhs1 (def_stmt),
- gimple_assign_rhs2 (def_stmt));
- op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ {
+ tree mask = gimple_assign_rhs2 (def_stmt);
+ base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
+ gimple_assign_rhs1 (def_stmt), mask);
+ TREE_OPERAND (exp, 0) = base;
+ }
+ align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
+ get_object_alignment (exp, BIGGEST_ALIGNMENT));
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
op0 = convert_memory_address_addr_space (address_mode, op0, as);
if (!integer_zerop (TREE_OPERAND (exp, 1)))
{
set_mem_addr_space (temp, as);
if (TREE_THIS_VOLATILE (exp))
MEM_VOLATILE_P (temp) = 1;
+ if (mode != BLKmode
+ && (unsigned) align < GET_MODE_ALIGNMENT (mode)
+ /* If the target does not have special handling for unaligned
+ loads of mode then it can use regular moves for them. */
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
+ {
+ rtx reg, insn;
+
+ /* We've already validated the memory, and we're creating a
+ new pseudo destination. The predicates really can't fail. */
+ reg = gen_reg_rtx (mode);
+
+ /* Nor can the insn generator. */
+ insn = GEN_FCN (icode) (reg, temp);
+ emit_insn (insn);
+
+ return reg;
+ }
return temp;
}
&& TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
&& TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
&& TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK
- && targetm.binds_local_p (array))
+ && const_value_known_p (array))
{
if (TREE_CODE (index) == INTEGER_CST)
{
HOST_WIDE_INT bitsize, bitpos;
tree offset;
int volatilep = 0, must_force_mem;
+ bool packedp = false;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
&mode1, &unsignedp, &volatilep, true);
rtx orig_op0, memloc;
infinitely recurse. */
gcc_assert (tem != exp);
+ if (TYPE_PACKED (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ || (TREE_CODE (TREE_OPERAND (exp, 1)) == FIELD_DECL
+ && DECL_PACKED (TREE_OPERAND (exp, 1))))
+ packedp = true;
+
/* If TEM's type is a union of variable size, pass TARGET to the inner
computation, since it will need a temporary and TARGET is known
to have to do. This occurs in unchecked conversion in Ada. */
/* If the bitfield is volatile, we want to access it in the
- field's mode, not the computed mode. */
- if (volatilep
- && GET_CODE (op0) == MEM
- && flag_strict_volatile_bitfields > 0)
- op0 = adjust_address (op0, mode1, 0);
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (op0))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ op0 = adjust_address (op0, mode1, 0);
+ else if (GET_MODE (op0) == VOIDmode)
+ op0 = adjust_address (op0, BLKmode, 0);
+ }
mode2
= CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
- op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
+ op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, packedp,
(modifier == EXPAND_STACK_PARM
? NULL_RTX : target),
ext_mode, ext_mode);
{
if (GET_CODE (op0) == SUBREG)
op0 = force_reg (GET_MODE (op0), op0);
- op0 = gen_lowpart (mode, op0);
+ temp = gen_lowpart_common (mode, op0);
+ if (temp)
+ op0 = temp;
+ else
+ {
+ if (!REG_P (op0) && !MEM_P (op0))
+ op0 = force_reg (GET_MODE (op0), op0);
+ op0 = gen_lowpart (mode, op0);
+ }
}
/* If both types are integral, convert from one mode to the other. */
else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
*ptr_offset = fold_convert (sizetype, offset);
return array;
}
- else if (TREE_CODE (array) == VAR_DECL)
+ else if (TREE_CODE (array) == VAR_DECL
+ || TREE_CODE (array) == CONST_DECL)
{
int length;
/* Variables initialized to string literals can be handled too. */
- if (DECL_INITIAL (array) == NULL_TREE
+ if (!const_value_known_p (array)
+ || !DECL_INITIAL (array)
|| TREE_CODE (DECL_INITIAL (array)) != STRING_CST)
return 0;
- /* If they are read-only, non-volatile and bind locally. */
- if (! TREE_READONLY (array)
- || TREE_SIDE_EFFECTS (array)
- || ! targetm.binds_local_p (array))
- return 0;
-
/* Avoid const char foo[4] = "abcde"; */
if (DECL_SIZE_UNIT (array) == NULL_TREE
|| TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
return gen_rtx_CONST_VECTOR (mode, v);
}
-
-/* Build a decl for a EH personality function named NAME. */
+/* Build a decl for a personality function given a language prefix. */
tree
-build_personality_function (const char *name)
+build_personality_function (const char *lang)
{
+ const char *unwind_and_version;
tree decl, type;
+ char *name;
+
+ switch (targetm.except_unwind_info (&global_options))
+ {
+ case UI_NONE:
+ return NULL;
+ case UI_SJLJ:
+ unwind_and_version = "_sj0";
+ break;
+ case UI_DWARF2:
+ case UI_TARGET:
+ unwind_and_version = "_v0";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
type = build_function_type_list (integer_type_node, integer_type_node,
long_long_unsigned_type_node,