#define STORE_MAX_PIECES MIN (MOVE_MAX_PIECES, 2 * sizeof (HOST_WIDE_INT))
+/* Determine whether the LEN bytes can be moved by using several move
+ instructions. Return nonzero if a call to move_by_pieces should
+ succeed. */
+
+int
+can_move_by_pieces (len, align)
+ unsigned HOST_WIDE_INT len;
+ unsigned int align;
+{
+ return MOVE_BY_PIECES_P (len, align);
+}
+
/* Generate several move instructions to copy LEN bytes from block FROM to
block TO. (These are MEM rtx's with BLKmode). The caller must pass FROM
and TO through protect_from_queue before calling.
If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
used to push FROM to the stack.
- ALIGN is maximum alignment we can assume. */
+ ALIGN is maximum stack alignment we can assume.
-void
-move_by_pieces (to, from, len, align)
+ If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
+ mempcpy, and if ENDP is 2 return memory the end minus one byte ala
+ stpcpy. */
+
+rtx
+move_by_pieces (to, from, len, align, endp)
rtx to, from;
unsigned HOST_WIDE_INT len;
unsigned int align;
+ int endp;
{
struct move_by_pieces data;
rtx to_addr, from_addr = XEXP (from, 0);
enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
+ align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from));
+
data.offset = 0;
data.from_addr = from_addr;
if (to)
/* The code above should have handled everything. */
if (data.len > 0)
abort ();
+
+ if (endp)
+ {
+ rtx to1;
+
+ if (data.reverse)
+ abort ();
+ if (data.autinc_to)
+ {
+ if (endp == 2)
+ {
+ if (HAVE_POST_INCREMENT && data.explicit_inc_to > 0)
+ emit_insn (gen_add2_insn (data.to_addr, constm1_rtx));
+ else
+ data.to_addr = copy_addr_to_reg (plus_constant (data.to_addr,
+ -1));
+ }
+ to1 = adjust_automodify_address (data.to, QImode, data.to_addr,
+ data.offset);
+ }
+ else
+ {
+ if (endp == 2)
+ --data.offset;
+ to1 = adjust_address (data.to, QImode, data.offset);
+ }
+ return to1;
+ }
+ else
+ return data.to;
}
/* Return number of insns required to move L bytes by pieces.
}
if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align))
- move_by_pieces (x, y, INTVAL (size), align);
+ move_by_pieces (x, y, INTVAL (size), align, 0);
else if (emit_block_move_via_movstr (x, y, size, align))
;
else if (may_use_call)
rtx x, y, size;
unsigned int align;
{
- /* Try the most limited insn first, because there's no point
- including more than one in the machine description unless
- the more limited one has some advantage. */
-
rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
/* Since this is a move insn, we don't care about volatility. */
volatile_ok = 1;
+ /* Try the most limited insn first, because there's no point
+ including more than one in the machine description unless
+ the more limited one has some advantage. */
+
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
emit_block_move_via_libcall (dst, src, size)
rtx dst, src, size;
{
+ rtx dst_addr, src_addr;
tree call_expr, arg_list, fn, src_tree, dst_tree, size_tree;
enum machine_mode size_mode;
rtx retval;
/* DST, SRC, or SIZE may have been passed through protect_from_queue.
- It is unsafe to save the value generated by protect_from_queue
- and reuse it later. Consider what happens if emit_queue is
- called before the return value from protect_from_queue is used.
+ It is unsafe to save the value generated by protect_from_queue and reuse
+ it later. Consider what happens if emit_queue is called before the
+ return value from protect_from_queue is used.
- Expansion of the CALL_EXPR below will call emit_queue before
- we are finished emitting RTL for argument setup. So if we are
- not careful we could get the wrong value for an argument.
+ Expansion of the CALL_EXPR below will call emit_queue before we are
+ finished emitting RTL for argument setup. So if we are not careful we
+ could get the wrong value for an argument.
- To avoid this problem we go ahead and emit code to copy X, Y &
- SIZE into new pseudos. We can then place those new pseudos
- into an RTL_EXPR and use them later, even after a call to
+ To avoid this problem we go ahead and emit code to copy the addresses of
+ DST and SRC and SIZE into new pseudos. We can then place those new
+ pseudos into an RTL_EXPR and use them later, even after a call to
emit_queue.
- Note this is not strictly needed for library calls since they
- do not call emit_queue before loading their arguments. However,
- we may need to have library calls call emit_queue in the future
- since failing to do so could cause problems for targets which
- define SMALL_REGISTER_CLASSES and pass arguments in registers. */
+ Note this is not strictly needed for library calls since they do not call
+ emit_queue before loading their arguments. However, we may need to have
+ library calls call emit_queue in the future since failing to do so could
+ cause problems for targets which define SMALL_REGISTER_CLASSES and pass
+ arguments in registers. */
- dst = copy_to_mode_reg (Pmode, XEXP (dst, 0));
- src = copy_to_mode_reg (Pmode, XEXP (src, 0));
+ dst_addr = copy_to_mode_reg (Pmode, XEXP (dst, 0));
+ src_addr = copy_to_mode_reg (Pmode, XEXP (src, 0));
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ dst_addr = convert_memory_address (ptr_mode, dst_addr);
+ src_addr = convert_memory_address (ptr_mode, src_addr);
+#endif
+
+ dst_tree = make_tree (ptr_type_node, dst_addr);
+ src_tree = make_tree (ptr_type_node, src_addr);
if (TARGET_MEM_FUNCTIONS)
size_mode = TYPE_MODE (sizetype);
else
size_mode = TYPE_MODE (unsigned_type_node);
+
size = convert_to_mode (size_mode, size, 1);
size = copy_to_mode_reg (size_mode, size);
For convenience, we generate the call to bcopy this way as well. */
- dst_tree = make_tree (ptr_type_node, dst);
- src_tree = make_tree (ptr_type_node, src);
if (TARGET_MEM_FUNCTIONS)
size_tree = make_tree (sizetype, size);
else
retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
- /* If we are initializing a readonly value, show the above call
- clobbered it. Otherwise, a load from it may erroneously be
- hoisted from a loop. */
+ /* If we are initializing a readonly value, show the above call clobbered
+ it. Otherwise, a load from it may erroneously be hoisted from a loop, or
+ the delay slot scheduler might overlook conflicts and take nasty
+ decisions. */
if (RTX_UNCHANGING_P (dst))
- emit_insn (gen_rtx_CLOBBER (VOIDmode, dst));
+ add_function_usage_to
+ (last_call_insn (), gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_CLOBBER (VOIDmode, dst),
+ NULL_RTX));
- return (TARGET_MEM_FUNCTIONS ? retval : NULL_RTX);
+ return TARGET_MEM_FUNCTIONS ? retval : NULL_RTX;
}
/* A subroutine of emit_block_move_via_libcall. Create the tree node
static GTY(()) tree block_move_fn;
-static tree
-emit_block_move_libcall_fn (for_call)
- int for_call;
+void
+init_block_move_fn (asmspec)
+ const char *asmspec;
{
- static bool emitted_extern;
- tree fn = block_move_fn, args;
-
- if (!fn)
+ if (!block_move_fn)
{
+ tree args, fn;
+
if (TARGET_MEM_FUNCTIONS)
{
fn = get_identifier ("memcpy");
block_move_fn = fn;
}
+ if (asmspec)
+ {
+ SET_DECL_RTL (block_move_fn, NULL_RTX);
+ SET_DECL_ASSEMBLER_NAME (block_move_fn, get_identifier (asmspec));
+ }
+}
+
+static tree
+emit_block_move_libcall_fn (for_call)
+ int for_call;
+{
+ static bool emitted_extern;
+
+ if (!block_move_fn)
+ init_block_move_fn (NULL);
+
if (for_call && !emitted_extern)
{
emitted_extern = true;
- make_decl_rtl (fn, NULL);
- assemble_external (fn);
+ make_decl_rtl (block_move_fn, NULL);
+ assemble_external (block_move_fn);
}
- return fn;
+ return block_move_fn;
}
/* A subroutine of emit_block_move. Copy the data via an explicit
}
/* Copy all or part of a BLKmode value X out of registers starting at REGNO.
- The number of registers to be filled is NREGS. SIZE indicates the number
- of bytes in the object X. */
+ The number of registers to be filled is NREGS. */
void
-move_block_from_reg (regno, x, nregs, size)
+move_block_from_reg (regno, x, nregs)
int regno;
rtx x;
int nregs;
- int size;
{
int i;
-#ifdef HAVE_store_multiple
- rtx pat;
- rtx last;
-#endif
- enum machine_mode mode;
if (nregs == 0)
return;
- /* If SIZE is that of a mode no bigger than a word, just use that
- mode's store operation. */
- if (size <= UNITS_PER_WORD
- && (mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0)) != BLKmode)
- {
- emit_move_insn (adjust_address (x, mode, 0), gen_rtx_REG (mode, regno));
- return;
- }
-
- /* Blocks smaller than a word on a BYTES_BIG_ENDIAN machine must be aligned
- to the left before storing to memory. Note that the previous test
- doesn't handle all cases (e.g. SIZE == 3). */
- if (size < UNITS_PER_WORD && BYTES_BIG_ENDIAN)
- {
- rtx tem = operand_subword (x, 0, 1, BLKmode);
- rtx shift;
-
- if (tem == 0)
- abort ();
-
- shift = expand_shift (LSHIFT_EXPR, word_mode,
- gen_rtx_REG (word_mode, regno),
- build_int_2 ((UNITS_PER_WORD - size)
- * BITS_PER_UNIT, 0), NULL_RTX, 0);
- emit_move_insn (tem, shift);
- return;
- }
-
/* See if the machine can do this with a store multiple insn. */
#ifdef HAVE_store_multiple
if (HAVE_store_multiple)
{
- last = get_last_insn ();
- pat = gen_store_multiple (x, gen_rtx_REG (word_mode, regno),
- GEN_INT (nregs));
+ rtx last = get_last_insn ();
+ rtx pat = gen_store_multiple (x, gen_rtx_REG (word_mode, regno),
+ GEN_INT (nregs));
if (pat)
{
emit_insn (pat);
else
abort ();
}
+ /* FIXME: A SIMD parallel will eventually lead to a subreg of a
+ SIMD register, which is currently broken. While we get GCC
+ to emit proper RTL for these cases, let's dump to memory. */
+ else if (VECTOR_MODE_P (GET_MODE (dst))
+ && GET_CODE (src) == REG)
+ {
+ int slen = GET_MODE_SIZE (GET_MODE (src));
+ rtx mem;
+
+ mem = assign_stack_temp (GET_MODE (src), slen, 0);
+ emit_move_insn (mem, src);
+ tmps[i] = adjust_address (mem, mode, (int) bytepos);
+ }
else if (CONSTANT_P (src)
|| (GET_CODE (src) == REG && GET_MODE (src) == mode))
tmps[i] = src;
/* Generate several move instructions to store LEN bytes generated by
CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
pointer which will be passed as argument in every CONSTFUN call.
- ALIGN is maximum alignment we can assume. */
+ ALIGN is maximum alignment we can assume.
+ If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
+ mempcpy, and if ENDP is 2 return memory the end minus one byte ala
+ stpcpy. */
-void
-store_by_pieces (to, len, constfun, constfundata, align)
+rtx
+store_by_pieces (to, len, constfun, constfundata, align, endp)
rtx to;
unsigned HOST_WIDE_INT len;
rtx (*constfun) PARAMS ((PTR, HOST_WIDE_INT, enum machine_mode));
PTR constfundata;
unsigned int align;
+ int endp;
{
struct store_by_pieces data;
data.len = len;
data.to = to;
store_by_pieces_1 (&data, align);
+ if (endp)
+ {
+ rtx to1;
+
+ if (data.reverse)
+ abort ();
+ if (data.autinc_to)
+ {
+ if (endp == 2)
+ {
+ if (HAVE_POST_INCREMENT && data.explicit_inc_to > 0)
+ emit_insn (gen_add2_insn (data.to_addr, constm1_rtx));
+ else
+ data.to_addr = copy_addr_to_reg (plus_constant (data.to_addr,
+ -1));
+ }
+ to1 = adjust_automodify_address (data.to, QImode, data.to_addr,
+ data.offset);
+ }
+ else
+ {
+ if (endp == 2)
+ --data.offset;
+ to1 = adjust_address (data.to, QImode, data.offset);
+ }
+ return to1;
+ }
+ else
+ return data.to;
}
/* Generate several move instructions to clear LEN bytes of block TO. (A MEM
static GTY(()) tree block_clear_fn;
-static tree
-clear_storage_libcall_fn (for_call)
- int for_call;
+void
+init_block_clear_fn (asmspec)
+ const char *asmspec;
{
- static bool emitted_extern;
- tree fn = block_clear_fn, args;
-
- if (!fn)
+ if (!block_clear_fn)
{
+ tree fn, args;
+
if (TARGET_MEM_FUNCTIONS)
{
fn = get_identifier ("memset");
block_clear_fn = fn;
}
+ if (asmspec)
+ {
+ SET_DECL_RTL (block_clear_fn, NULL_RTX);
+ SET_DECL_ASSEMBLER_NAME (block_clear_fn, get_identifier (asmspec));
+ }
+}
+
+static tree
+clear_storage_libcall_fn (for_call)
+ int for_call;
+{
+ static bool emitted_extern;
+
+ if (!block_clear_fn)
+ init_block_clear_fn (NULL);
+
if (for_call && !emitted_extern)
{
emitted_extern = true;
- make_decl_rtl (fn, NULL);
- assemble_external (fn);
+ make_decl_rtl (block_clear_fn, NULL);
+ assemble_external (block_clear_fn);
}
- return fn;
+ return block_clear_fn;
}
\f
/* Generate code to copy Y into X.
{
enum machine_mode mode = GET_MODE (x);
rtx y_cst = NULL_RTX;
- rtx last_insn;
+ rtx last_insn, set;
x = protect_from_queue (x, 1);
y = protect_from_queue (y, 0);
&& (last_insn = compress_float_constant (x, y)))
return last_insn;
+ y_cst = y;
+
if (!LEGITIMATE_CONSTANT_P (y))
{
- y_cst = y;
y = force_const_mem (mode, y);
/* If the target's cannot_force_const_mem prevented the spill,
last_insn = emit_move_insn_1 (x, y);
- if (y_cst && GET_CODE (x) == REG)
+ if (y_cst && GET_CODE (x) == REG
+ && (set = single_set (last_insn)) != NULL_RTX
+ && SET_DEST (set) == x
+ && ! rtx_equal_p (y_cst, SET_SRC (set)))
set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
return last_insn;
/* Note that the real part always precedes the imag part in memory
regardless of machine's endianness. */
#ifdef STACK_GROWS_DOWNWARD
- emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_imagpart (submode, y)));
- emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_realpart (submode, y)));
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_imagpart (submode, y));
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_realpart (submode, y));
#else
- emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_realpart (submode, y)));
- emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_imagpart (submode, y)));
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_realpart (submode, y));
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_imagpart (submode, y));
#endif
}
else
|| GET_CODE (imagpart_x) == SUBREG))
emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
- emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (realpart_x, realpart_y));
- emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (imagpart_x, imagpart_y));
+ emit_move_insn (realpart_x, realpart_y);
+ emit_move_insn (imagpart_x, imagpart_y);
}
return get_last_insn ();
last_insn = get_last_insn ();
if (GET_CODE (x) == REG)
- REG_NOTES (last_insn)
- = gen_rtx_EXPR_LIST (REG_EQUAL, y, REG_NOTES (last_insn));
+ set_unique_reg_note (last_insn, REG_EQUAL, y);
return last_insn;
}
&& PUSH_ARGS
&& GET_CODE (size) == CONST_INT
&& skip == 0
+ && MEM_ALIGN (xinner) >= align
&& (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align))
/* Here we avoid the case of a structure whose weak alignment
forces many pushes of a small amount of data,
&& where_pad != none && where_pad != stack_direction)
anti_adjust_stack (GEN_INT (extra));
- move_by_pieces (NULL, xinner, INTVAL (size) - used, align);
+ move_by_pieces (NULL, xinner, INTVAL (size) - used, align, 0);
}
else
#endif /* PUSH_ROUNDING */
{
rtx offset_rtx;
- if (contains_placeholder_p (offset))
+ if (CONTAINS_PLACEHOLDER_P (offset))
offset = build (WITH_RECORD_EXPR, sizetype,
offset, make_tree (TREE_TYPE (exp), target));
that object. Finally, load from the object into TARGET. This is not
very efficient in general, but should only be slightly more expensive
than the otherwise-required unaligned accesses. Perhaps this can be
- cleaned up later. */
+ cleaned up later. It's tempting to make OBJECT readonly, but it's set
+ twice, once with emit_move_insn and once via store_field. */
if (mode == BLKmode
&& (GET_CODE (target) == REG || GET_CODE (target) == SUBREG))
{
- rtx object
- = assign_temp
- (build_qualified_type (type, TYPE_QUALS (type) | TYPE_QUAL_CONST),
- 0, 1, 1);
+ rtx object = assign_temp (type, 0, 1, 1);
rtx blk_object = adjust_address (object, BLKmode, 0);
if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target)))
/* If the field isn't aligned enough to store as an ordinary memref,
store it as a bit field. */
|| (mode != BLKmode
- && ((SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target))
- && (MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode)))
- || bitpos % GET_MODE_ALIGNMENT (mode)))
+ && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
+ || bitpos % GET_MODE_ALIGNMENT (mode))
+ && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target)))
+ || (bitpos % BITS_PER_UNIT != 0)))
/* If the RHS and field are a constant size and the size of the
RHS isn't the same size as the bitfield, we must use bitfield
operations. */
temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
size_int (GET_MODE_BITSIZE (GET_MODE (temp))
- bitsize),
- temp, 1);
+ NULL_RTX, 1);
/* Unless MODE is VOIDmode or BLKmode, convert TEMP to
MODE. */
made during type construction. */
if (this_offset == 0)
break;
- else if (! TREE_CONSTANT (this_offset)
- && contains_placeholder_p (this_offset))
+ else if (CONTAINS_PLACEHOLDER_P (this_offset))
this_offset = build (WITH_RECORD_EXPR, sizetype, this_offset, exp);
offset = size_binop (PLUS_EXPR, offset, this_offset);
/* If the index has a self-referential type, pass it to a
WITH_RECORD_EXPR; if the component size is, pass our
component to one. */
- if (! TREE_CONSTANT (index)
- && contains_placeholder_p (index))
+ if (CONTAINS_PLACEHOLDER_P (index))
index = build (WITH_RECORD_EXPR, TREE_TYPE (index), index, exp);
- if (! TREE_CONSTANT (unit_size)
- && contains_placeholder_p (unit_size))
+ if (CONTAINS_PLACEHOLDER_P (unit_size))
unit_size = build (WITH_RECORD_EXPR, sizetype, unit_size, array);
offset = size_binop (PLUS_EXPR, offset,
continue;
}
+
+ /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal
+ conversions that don't change the mode, and all view conversions
+ except those that need to "step up" the alignment. */
else if (TREE_CODE (exp) != NON_LVALUE_EXPR
- && TREE_CODE (exp) != VIEW_CONVERT_EXPR
+ && ! (TREE_CODE (exp) == VIEW_CONVERT_EXPR
+ && ! ((TYPE_ALIGN (TREE_TYPE (exp))
+ > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && STRICT_ALIGNMENT
+ && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ < BIGGEST_ALIGNMENT)
+ && (TYPE_ALIGN_OK (TREE_TYPE (exp))
+ || TYPE_ALIGN_OK (TREE_TYPE
+ (TREE_OPERAND (exp, 0))))))
&& ! ((TREE_CODE (exp) == NOP_EXPR
|| TREE_CODE (exp) == CONVERT_EXPR)
&& (TYPE_MODE (TREE_TYPE (exp))
case LABEL_DECL:
{
tree function = decl_function_context (exp);
- /* Handle using a label in a containing function. */
- if (function != current_function_decl
- && function != inline_function_decl && function != 0)
- {
- struct function *p = find_function_data (function);
- p->expr->x_forced_labels
- = gen_rtx_EXPR_LIST (VOIDmode, label_rtx (exp),
- p->expr->x_forced_labels);
- }
+ /* Labels in containing functions, or labels used from initializers,
+ must be forced. */
+ if (modifier == EXPAND_INITIALIZER
+ || (function != current_function_decl
+ && function != inline_function_decl
+ && function != 0))
+ temp = force_label_rtx (exp);
else
- {
- if (modifier == EXPAND_INITIALIZER)
- forced_labels = gen_rtx_EXPR_LIST (VOIDmode,
- label_rtx (exp),
- forced_labels);
- }
+ temp = label_rtx (exp);
- temp = gen_rtx_MEM (FUNCTION_MODE,
- gen_rtx_LABEL_REF (Pmode, label_rtx (exp)));
+ temp = gen_rtx_MEM (FUNCTION_MODE, gen_rtx_LABEL_REF (Pmode, temp));
if (function != current_function_decl
&& function != inline_function_decl && function != 0)
LABEL_REF_NONLOCAL_P (XEXP (temp, 0)) = 1;
case COMPLEX_CST:
case STRING_CST:
- if (! TREE_CST_RTL (exp))
- output_constant_def (exp, 1);
+ temp = output_constant_def (exp, 1);
- /* TREE_CST_RTL probably contains a constant address.
+ /* temp contains a constant address.
On RISC machines where a constant address isn't valid,
make some insns to get that address into a register. */
- if (GET_CODE (TREE_CST_RTL (exp)) == MEM
- && modifier != EXPAND_CONST_ADDRESS
+ if (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
&& modifier != EXPAND_SUM
- && (! memory_address_p (mode, XEXP (TREE_CST_RTL (exp), 0))
- || (flag_force_addr
- && GET_CODE (XEXP (TREE_CST_RTL (exp), 0)) != REG)))
- return replace_equiv_address (TREE_CST_RTL (exp),
- copy_rtx (XEXP (TREE_CST_RTL (exp), 0)));
- return TREE_CST_RTL (exp);
+ && (! memory_address_p (mode, XEXP (temp, 0))
+ || flag_force_addr))
+ return replace_equiv_address (temp,
+ copy_rtx (XEXP (temp, 0)));
+ return temp;
case EXPR_WITH_FILE_LOCATION:
{
rtx to_return;
- const char *saved_input_filename = input_filename;
- int saved_lineno = lineno;
+ location_t saved_loc = input_location;
input_filename = EXPR_WFL_FILENAME (exp);
- lineno = EXPR_WFL_LINENO (exp);
+ input_line = EXPR_WFL_LINENO (exp);
if (EXPR_WFL_EMIT_LINE_NOTE (exp))
- emit_line_note (input_filename, lineno);
+ emit_line_note (input_filename, input_line);
/* Possibly avoid switching back and forth here. */
to_return = expand_expr (EXPR_WFL_NODE (exp), target, tmode, modifier);
- input_filename = saved_input_filename;
- lineno = saved_lineno;
+ input_location = saved_loc;
return to_return;
}
&& ((TREE_CODE (type) == VECTOR_TYPE
&& !is_zeros_p (exp))
|| ! mostly_zeros_p (exp)))))
- || (modifier == EXPAND_INITIALIZER && TREE_CONSTANT (exp)))
+ || ((modifier == EXPAND_INITIALIZER
+ || modifier == EXPAND_CONST_ADDRESS)
+ && TREE_CONSTANT (exp)))
{
rtx constructor = output_constant_def (exp, 1);
}
}
}
- /* Fall through. */
+ goto normal_inner_ref;
case COMPONENT_REF:
- case BIT_FIELD_REF:
- case ARRAY_RANGE_REF:
/* If the operand is a CONSTRUCTOR, we can just extract the
- appropriate field if it is present. Don't do this if we have
- already written the data since we want to refer to that copy
- and varasm.c assumes that's what we'll do. */
- if (code == COMPONENT_REF
- && TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR
- && TREE_CST_RTL (TREE_OPERAND (exp, 0)) == 0)
+ appropriate field if it is present. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR)
{
tree elt;
return op0;
}
}
+ goto normal_inner_ref;
+ case BIT_FIELD_REF:
+ case ARRAY_RANGE_REF:
+ normal_inner_ref:
{
enum machine_mode mode1;
HOST_WIDE_INT bitsize, bitpos;
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
- && ((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
+ && (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
+ || (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
&& SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0)))
- || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)))
+ || (bitpos % BITS_PER_UNIT != 0)))
/* If the type and the field are a constant size and the
size of the type isn't the same size as the bitfield,
we must use bitfield operations. */
op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier);
/* If the input and output modes are both the same, we are done.
- Otherwise, if neither mode is BLKmode and both are within a word, we
- can use gen_lowpart. If neither is true, make sure the operand is
- in memory and convert the MEM to the new mode. */
+ Otherwise, if neither mode is BLKmode and both are integral and within
+ a word, we can use gen_lowpart. If neither is true, make sure the
+ operand is in memory and convert the MEM to the new mode. */
if (TYPE_MODE (type) == GET_MODE (op0))
;
else if (TYPE_MODE (type) != BLKmode && GET_MODE (op0) != BLKmode
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
&& GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD
&& GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD)
op0 = gen_lowpart (TYPE_MODE (type), op0);
op0);
else if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
|| GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF
- || GET_CODE (op0) == PARALLEL)
+ || GET_CODE (op0) == PARALLEL || GET_CODE (op0) == LO_SUM)
{
/* If the operand is a SAVE_EXPR, we can deal with this by
forcing the SAVE_EXPR into memory. */
temp = gen_reg_rtx (CASE_VECTOR_MODE);
vector = gen_rtx_MEM (CASE_VECTOR_MODE, index);
RTX_UNCHANGING_P (vector) = 1;
+ MEM_NOTRAP_P (vector) = 1;
convert_move (temp, vector, 0);
emit_jump_insn (gen_tablejump (temp, table_label));