#include "target.h"
#include "timevar.h"
#include "df.h"
+#include "diagnostic.h"
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
void
init_expr (void)
{
- cfun->expr = ggc_alloc_cleared (sizeof (struct expr_status));
+ memset (&rtl.expr, 0, sizeof (rtl.expr));
}
\f
/* Copy data from FROM to TO, where the machine modes are not the same.
&& ((code = can_extend_p (to_mode, word_mode, unsignedp))
!= CODE_FOR_nothing))
{
+ rtx word_to = gen_reg_rtx (word_mode);
if (REG_P (to))
{
if (reg_overlap_mentioned_p (to, from))
from = force_reg (from_mode, from);
emit_insn (gen_rtx_CLOBBER (VOIDmode, to));
}
- convert_move (gen_lowpart (word_mode, to), from, unsignedp);
- emit_unop_insn (code, to,
- gen_lowpart (word_mode, to), equiv_code);
+ convert_move (word_to, from, unsignedp);
+ emit_unop_insn (code, to, word_to, equiv_code);
return;
}
else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
&& XVECLEN (dst, 0) > 1)
tmps[i] = simplify_gen_subreg (mode, src, GET_MODE(dst), bytepos);
- else if (CONSTANT_P (src)
- || (REG_P (src) && GET_MODE (src) == mode))
+ else if (CONSTANT_P (src))
+ {
+ HOST_WIDE_INT len = (HOST_WIDE_INT) bytelen;
+
+ if (len == ssize)
+ tmps[i] = src;
+ else
+ {
+ rtx first, second;
+
+ gcc_assert (2 * len == ssize);
+ split_double (src, &first, &second);
+ if (i)
+ tmps[i] = second;
+ else
+ tmps[i] = first;
+ }
+ }
+ else if (REG_P (src) && GET_MODE (src) == mode)
tmps[i] = src;
else
tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
rtx src = NULL, dst = NULL;
unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
+ enum machine_mode copy_mode;
if (tgtblk == 0)
{
padding_correction
= (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
- /* Copy the structure BITSIZE bites at a time.
+ /* Copy the structure BITSIZE bits at a time. If the target lives in
+ memory, take care of not reading/writing past its end by selecting
+ a copy mode suited to BITSIZE. This should always be possible given
+ how it is computed.
We could probably emit more efficient code for machines which do not use
strict alignment, but it doesn't seem worth the effort at the current
time. */
+
+ copy_mode = word_mode;
+ if (MEM_P (tgtblk))
+ {
+ enum machine_mode mem_mode = mode_for_size (bitsize, MODE_INT, 1);
+ if (mem_mode != BLKmode)
+ copy_mode = mem_mode;
+ }
+
for (bitpos = 0, xbitpos = padding_correction;
bitpos < bytes * BITS_PER_UNIT;
bitpos += bitsize, xbitpos += bitsize)
dst = operand_subword (tgtblk, bitpos / BITS_PER_WORD, 1, BLKmode);
/* Use xbitpos for the source extraction (right justified) and
- xbitpos for the destination store (left justified). */
- store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, word_mode,
+ bitpos for the destination store (left justified). */
+ store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, copy_mode,
extract_bit_field (src, bitsize,
xbitpos % BITS_PER_WORD, 1,
- NULL_RTX, word_mode, word_mode));
+ NULL_RTX, copy_mode, copy_mode));
}
return tgtblk;
/* If X or Y are memory references, verify that their addresses are valid
for the machine. */
if (MEM_P (x)
- && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
- && ! push_operand (x, GET_MODE (x)))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
+ && (! memory_address_p (GET_MODE (x), XEXP (x, 0))
+ && ! push_operand (x, GET_MODE (x))))
x = validize_mem (x);
if (MEM_P (y)
- && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
+ && ! memory_address_p (GET_MODE (y), XEXP (y, 0)))
y = validize_mem (y);
gcc_assert (mode != BLKmode);
converting modes. */
if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
&& TREE_TYPE (TREE_TYPE (exp)) == 0
- && (!lang_hooks.reduce_bit_field_operations
- || (GET_MODE_PRECISION (GET_MODE (target))
- == TYPE_PRECISION (TREE_TYPE (exp)))))
+ && GET_MODE_PRECISION (GET_MODE (target))
+ == TYPE_PRECISION (TREE_TYPE (exp)))
{
if (TYPE_UNSIGNED (TREE_TYPE (exp))
!= SUBREG_PROMOTED_UNSIGNED_P (target))
temp = convert_to_mode (GET_MODE (target), temp, unsignedp);
emit_move_insn (target, temp);
}
- else if (GET_MODE (target) == BLKmode)
+ else if (GET_MODE (target) == BLKmode
+ || GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
(call_param_p
? BLOCK_OP_CALL_PARM
case UNION_TYPE:
case QUAL_UNION_TYPE:
- {
- /* Ho hum. How in the world do we guess here? Clearly it isn't
- right to count the fields. Guess based on the number of words. */
- HOST_WIDE_INT n = int_size_in_bytes (type);
- if (n < 0)
- return -1;
- return n / UNITS_PER_WORD;
- }
+ return -1;
case COMPLEX_TYPE:
return 2;
else if (TREE_CODE (exp) == BIT_FIELD_REF)
{
size_tree = TREE_OPERAND (exp, 1);
- *punsignedp = BIT_FIELD_REF_UNSIGNED (exp);
+ *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
+ || TYPE_UNSIGNED (TREE_TYPE (exp)));
/* For vector types, with the correct size of access, use the mode of
inner type. */
/* ??? This should be considered a front-end bug. We should not be
generating ADDR_EXPR of something that isn't an LVALUE. The only
exception here is STRING_CST. */
- if (TREE_CODE (exp) == CONSTRUCTOR
- || CONSTANT_CLASS_P (exp))
+ if (CONSTANT_CLASS_P (exp))
return XEXP (expand_expr_constant (exp, 0, modifier), 0);
/* Everything must be something allowed by is_gimple_addressable. */
default:
/* If the object is a DECL, then expand it for its rtl. Don't bypass
expand_expr, as that can have various side effects; LABEL_DECLs for
- example, may not have their DECL_RTL set yet. Assume language
- specific tree nodes can be expanded in some interesting way. */
+ example, may not have their DECL_RTL set yet. Expand the rtl of
+ CONSTRUCTORs too, which should yield a memory reference for the
+ constructor's contents. Assume language specific tree nodes can
+ be expanded in some interesting way. */
if (DECL_P (exp)
+ || TREE_CODE (exp) == CONSTRUCTOR
|| TREE_CODE (exp) >= LAST_AND_UNUSED_TREE_CODE)
{
result = expand_expr (exp, target, tmode,
/* Handle ERROR_MARK before anybody tries to access its type. */
if (TREE_CODE (exp) == ERROR_MARK
+ || TREE_CODE (exp) == PREDICT_EXPR
|| (!GIMPLE_TUPLE_P (exp) && TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
{
ret = CONST0_RTX (tmode);
rtx subtarget, original_target;
int ignore;
tree context, subexp0, subexp1;
- bool reduce_bit_field = false;
-#define REDUCE_BIT_FIELD(expr) (reduce_bit_field && !ignore \
+ bool reduce_bit_field;
+#define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
? reduce_to_bit_field_precision ((expr), \
target, \
type) \
mode = TYPE_MODE (type);
unsignedp = TYPE_UNSIGNED (type);
}
- if (lang_hooks.reduce_bit_field_operations
- && TREE_CODE (type) == INTEGER_TYPE
- && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type))
- {
- /* An operation in what may be a bit-field type needs the
- result to be reduced to the precision of the bit-field type,
- which is narrower than that of the type's mode. */
- reduce_bit_field = true;
- if (modifier == EXPAND_STACK_PARM)
- target = 0;
- }
- /* Use subtarget as the target for operand 0 of a binary operation. */
- subtarget = get_subtarget (target);
- original_target = target;
ignore = (target == const0_rtx
|| ((code == NON_LVALUE_EXPR || code == NOP_EXPR
|| code == CONVERT_EXPR || code == COND_EXPR
|| code == VIEW_CONVERT_EXPR)
&& TREE_CODE (type) == VOID_TYPE));
+ /* An operation in what may be a bit-field type needs the
+ result to be reduced to the precision of the bit-field type,
+ which is narrower than that of the type's mode. */
+ reduce_bit_field = (!ignore
+ && TREE_CODE (type) == INTEGER_TYPE
+ && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+
/* If we are going to ignore this result, we need only do something
if there is a side-effect somewhere in the expression. If there
is, short-circuit the most common cases here. Note that we must
target = 0;
}
+ if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
+ target = 0;
+
+ /* Use subtarget as the target for operand 0 of a binary operation. */
+ subtarget = get_subtarget (target);
+ original_target = target;
switch (code)
{
if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
temp = validize_mem (decl_rtl);
- /* If DECL_RTL is memory, we are in the normal case and either
- the address is not valid or it is not a register and -fforce-addr
- is specified, get the address into a register. */
+ /* If DECL_RTL is memory, we are in the normal case and the
+ address is not valid, get the address into a register. */
else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
{
decl_rtl = use_anchored_address (decl_rtl);
if (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_SUM
- && (!memory_address_p (DECL_MODE (exp), XEXP (decl_rtl, 0))
- || (flag_force_addr && !REG_P (XEXP (decl_rtl, 0)))))
+ && !memory_address_p (DECL_MODE (exp), XEXP (decl_rtl, 0)))
temp = replace_equiv_address (decl_rtl,
copy_rtx (XEXP (decl_rtl, 0)));
}
if (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
&& modifier != EXPAND_SUM
- && (! memory_address_p (mode, XEXP (temp, 0))
- || flag_force_addr))
+ && ! memory_address_p (mode, XEXP (temp, 0)))
return replace_equiv_address (temp,
copy_rtx (XEXP (temp, 0)));
return temp;
/* All valid uses of __builtin_va_arg_pack () are removed during
inlining. */
if (CALL_EXPR_VA_ARG_PACK (exp))
- error ("invalid use of %<__builtin_va_arg_pack ()%>");
+ error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp);
{
tree fndecl = get_callee_fndecl (exp), attr;
if (fndecl
&& (attr = lookup_attribute ("error",
DECL_ATTRIBUTES (fndecl))) != NULL)
- error ("call to %qs declared with attribute error: %s",
- lang_hooks.decl_printable_name (fndecl, 1),
+ error ("%Kcall to %qs declared with attribute error: %s",
+ exp, lang_hooks.decl_printable_name (fndecl, 1),
TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
if (fndecl
&& (attr = lookup_attribute ("warning",
DECL_ATTRIBUTES (fndecl))) != NULL)
- warning (0, "call to %qs declared with attribute warning: %s",
- lang_hooks.decl_printable_name (fndecl, 1),
+ warning (0, "%Kcall to %qs declared with attribute warning: %s",
+ exp, lang_hooks.decl_printable_name (fndecl, 1),
TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
/* Check for a built-in function. */
}
return expand_call (exp, target, ignore);
+ case PAREN_EXPR:
case NON_LVALUE_EXPR:
case NOP_EXPR:
case CONVERT_EXPR:
case BIT_XOR_EXPR:
goto binop;
- case LSHIFT_EXPR:
- case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
+ /* The expansion code only handles expansion of mode precision
+ rotates. */
+ gcc_assert (GET_MODE_PRECISION (TYPE_MODE (type))
+ == TYPE_PRECISION (type));
+
+ /* Falltrough. */
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
/* If this is a fixed-point operation, then we cannot use the code
below because "expand_shift" doesn't support sat/no-sat fixed-point
shifts. */
target = 0;
op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget,
VOIDmode, EXPAND_NORMAL);
- return expand_shift (code, mode, op0, TREE_OPERAND (exp, 1), target,
+ temp = expand_shift (code, mode, op0, TREE_OPERAND (exp, 1), target,
unsignedp);
+ if (code == LSHIFT_EXPR)
+ temp = REDUCE_BIT_FIELD (temp);
+ return temp;
/* Could determine the answer when only additive constants differ. Also,
the addition of one can be handled by changing the condition. */
goto binop;
}
+ case OMP_ATOMIC_LOAD:
+ case OMP_ATOMIC_STORE:
+ /* OMP expansion is not run when there were errors, so these codes
+ can get here. */
+ gcc_assert (errorcount != 0);
+ return NULL_RTX;
+
default:
return lang_hooks.expand_expr (exp, original_target, tmode,
modifier, alt_rtl);
index = copy_to_mode_reg (Pmode, index);
#endif
- /* If flag_force_addr were to affect this address
- it could interfere with the tricky assumptions made
- about addresses that contain label-refs,
- which may be valid only very near the tablejump itself. */
/* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
GET_MODE_SIZE, because this indicates how large insns are. The other
uses should all be Pmode, because they are addresses. This code
index = PIC_CASE_VECTOR_ADDRESS (index);
else
#endif
- index = memory_address_noforce (CASE_VECTOR_MODE, index);
+ index = memory_address (CASE_VECTOR_MODE, index);
temp = gen_reg_rtx (CASE_VECTOR_MODE);
vector = gen_const_mem (CASE_VECTOR_MODE, index);
convert_move (temp, vector, 0);