/* Convert tree expression to rtl instructions, for GNU compiler.
Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "tree-flow.h"
#include "target.h"
#include "timevar.h"
+#include "df.h"
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode,
struct move_by_pieces *);
static bool block_move_libcall_safe_for_call_parm (void);
-static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned);
-static rtx emit_block_move_via_libcall (rtx, rtx, rtx);
+static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT);
static tree emit_block_move_libcall_fn (int);
static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode);
static void store_by_pieces_1 (struct store_by_pieces *, unsigned int);
static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode,
struct store_by_pieces *);
-static bool clear_storage_via_clrmem (rtx, rtx, unsigned);
-static rtx clear_storage_via_libcall (rtx, rtx);
static tree clear_storage_libcall_fn (int);
static rtx compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
tree, tree, int, int);
static void store_constructor (tree, rtx, int, HOST_WIDE_INT);
static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode,
- tree, enum machine_mode, int, tree, int);
+ tree, tree, int, bool);
-static unsigned HOST_WIDE_INT highest_pow2_factor (tree);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (tree, tree);
static int is_aligning_offset (tree, tree);
#endif
static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx);
static rtx const_vector_from_tree (tree);
+static void write_complex_part (rtx, rtx, bool);
/* Record for each mode whether we can move a register directly to or
from an object of that mode in memory. If we can't, we won't try
/* This array records the insn_code of insns to perform block moves. */
enum insn_code movmem_optab[NUM_MACHINE_MODES];
-/* This array records the insn_code of insns to perform block clears. */
-enum insn_code clrmem_optab[NUM_MACHINE_MODES];
+/* This array records the insn_code of insns to perform block sets. */
+enum insn_code setmem_optab[NUM_MACHINE_MODES];
-/* These arrays record the insn_code of two different kinds of insns
+/* These arrays record the insn_code of three different kinds of insns
to perform block compares. */
enum insn_code cmpstr_optab[NUM_MACHINE_MODES];
+enum insn_code cmpstrn_optab[NUM_MACHINE_MODES];
enum insn_code cmpmem_optab[NUM_MACHINE_MODES];
+/* Synchronization primitives. */
+enum insn_code sync_add_optab[NUM_MACHINE_MODES];
+enum insn_code sync_sub_optab[NUM_MACHINE_MODES];
+enum insn_code sync_ior_optab[NUM_MACHINE_MODES];
+enum insn_code sync_and_optab[NUM_MACHINE_MODES];
+enum insn_code sync_xor_optab[NUM_MACHINE_MODES];
+enum insn_code sync_nand_optab[NUM_MACHINE_MODES];
+enum insn_code sync_old_add_optab[NUM_MACHINE_MODES];
+enum insn_code sync_old_sub_optab[NUM_MACHINE_MODES];
+enum insn_code sync_old_ior_optab[NUM_MACHINE_MODES];
+enum insn_code sync_old_and_optab[NUM_MACHINE_MODES];
+enum insn_code sync_old_xor_optab[NUM_MACHINE_MODES];
+enum insn_code sync_old_nand_optab[NUM_MACHINE_MODES];
+enum insn_code sync_new_add_optab[NUM_MACHINE_MODES];
+enum insn_code sync_new_sub_optab[NUM_MACHINE_MODES];
+enum insn_code sync_new_ior_optab[NUM_MACHINE_MODES];
+enum insn_code sync_new_and_optab[NUM_MACHINE_MODES];
+enum insn_code sync_new_xor_optab[NUM_MACHINE_MODES];
+enum insn_code sync_new_nand_optab[NUM_MACHINE_MODES];
+enum insn_code sync_compare_and_swap[NUM_MACHINE_MODES];
+enum insn_code sync_compare_and_swap_cc[NUM_MACHINE_MODES];
+enum insn_code sync_lock_test_and_set[NUM_MACHINE_MODES];
+enum insn_code sync_lock_release[NUM_MACHINE_MODES];
+
/* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */
#ifndef SLOW_UNALIGNED_ACCESS
if (! HARD_REGNO_MODE_OK (regno, mode))
continue;
- REGNO (reg) = regno;
+ SET_REGNO (reg, regno);
SET_SRC (pat) = mem;
SET_DEST (pat) = reg;
{
enum machine_mode to_mode = GET_MODE (to);
enum machine_mode from_mode = GET_MODE (from);
- int to_real = GET_MODE_CLASS (to_mode) == MODE_FLOAT;
- int from_real = GET_MODE_CLASS (from_mode) == MODE_FLOAT;
+ int to_real = SCALAR_FLOAT_MODE_P (to_mode);
+ int from_real = SCALAR_FLOAT_MODE_P (from_mode);
enum insn_code code;
rtx libcall;
gcc_assert (to_real == from_real);
+ gcc_assert (to_mode != BLKmode);
+ gcc_assert (from_mode != BLKmode);
/* If the source and destination are already the same, then there's
nothing to do. */
rtx value, insns;
convert_optab tab;
- gcc_assert (GET_MODE_PRECISION (from_mode)
- != GET_MODE_PRECISION (to_mode));
-
- if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
+ gcc_assert ((GET_MODE_PRECISION (from_mode)
+ != GET_MODE_PRECISION (to_mode))
+ || (DECIMAL_FLOAT_MODE_P (from_mode)
+ != DECIMAL_FLOAT_MODE_P (to_mode)));
+
+ if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
+ /* Conversion between decimal float and binary float, same size. */
+ tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
+ else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
tab = sext_optab;
else
tab = trunc_optab;
}
if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
{
+ rtx new_from;
enum machine_mode full_mode
= smallest_mode_for_size (GET_MODE_BITSIZE (from_mode), MODE_INT);
gcc_assert (sext_optab->handlers[full_mode][from_mode].insn_code
!= CODE_FOR_nothing);
- emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code,
- to, from, UNKNOWN);
if (to_mode == full_mode)
- return;
+ {
+ emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code,
+ to, from, UNKNOWN);
+ return;
+ }
+
+ new_from = gen_reg_rtx (full_mode);
+ emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code,
+ new_from, from, UNKNOWN);
/* else proceed to integer conversions below. */
from_mode = full_mode;
+ from = new_from;
}
/* Now both modes are integers. */
if ((code = can_extend_p (to_mode, from_mode, unsignedp))
!= CODE_FOR_nothing)
{
- if (flag_force_mem)
- from = force_not_mem (from);
-
emit_unop_insn (code, to, from, equiv_code);
return;
}
0 otherwise. */
rtx
-emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
+emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
+ unsigned int expected_align, HOST_WIDE_INT expected_size)
{
bool may_use_call;
rtx retval = 0;
switch (method)
{
case BLOCK_OP_NORMAL:
+ case BLOCK_OP_TAILCALL:
may_use_call = true;
break;
if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align))
move_by_pieces (x, y, INTVAL (size), align, 0);
- else if (emit_block_move_via_movmem (x, y, size, align))
+ else if (emit_block_move_via_movmem (x, y, size, align,
+ expected_align, expected_size))
;
else if (may_use_call)
- retval = emit_block_move_via_libcall (x, y, size);
+ retval = emit_block_move_via_libcall (x, y, size,
+ method == BLOCK_OP_TAILCALL);
else
emit_block_move_via_loop (x, y, size, align);
return retval;
}
+rtx
+emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
+{
+ return emit_block_move_hints (x, y, size, method, 0, -1);
+}
+
/* A subroutine of emit_block_move. Returns true if calling the
block move libcall will not clobber any parameters which may have
already been placed on the stack. */
/* If registers go on the stack anyway, any argument is sure to clobber
an outgoing argument. */
-#if defined (REG_PARM_STACK_SPACE) && defined (OUTGOING_REG_PARM_STACK_SPACE)
- {
- tree fn = emit_block_move_libcall_fn (false);
- (void) fn;
- if (REG_PARM_STACK_SPACE (fn) != 0)
- return false;
- }
+#if defined (REG_PARM_STACK_SPACE)
+ if (OUTGOING_REG_PARM_STACK_SPACE)
+ {
+ tree fn;
+ fn = emit_block_move_libcall_fn (false);
+ if (REG_PARM_STACK_SPACE (fn) != 0)
+ return false;
+ }
#endif
/* If any argument goes in memory, then it might clobber an outgoing
rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
if (!tmp || !REG_P (tmp))
return false;
- if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode,
- NULL_TREE, 1))
+ if (targetm.calls.arg_partial_bytes (&args_so_far, mode, NULL, 1))
return false;
FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1);
}
return true if successful. */
static bool
-emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align)
+emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
+ unsigned int expected_align, HOST_WIDE_INT expected_size)
{
rtx opalign = GEN_INT (align / BITS_PER_UNIT);
int save_volatile_ok = volatile_ok;
enum machine_mode mode;
+ if (expected_align < align)
+ expected_align = align;
+
/* Since this is a move insn, we don't care about volatility. */
volatile_ok = 1;
that it doesn't fail the expansion because it thinks
emitting the libcall would be more efficient. */
- pat = GEN_FCN ((int) code) (x, y, op2, opalign);
+ if (insn_data[(int) code].n_operands == 4)
+ pat = GEN_FCN ((int) code) (x, y, op2, opalign);
+ else
+ pat = GEN_FCN ((int) code) (x, y, op2, opalign,
+ GEN_INT (expected_align),
+ GEN_INT (expected_size));
if (pat)
{
emit_insn (pat);
/* A subroutine of emit_block_move. Expand a call to memcpy.
Return the return value from memcpy, 0 otherwise. */
-static rtx
-emit_block_move_via_libcall (rtx dst, rtx src, rtx size)
+rtx
+emit_block_move_via_libcall (rtx dst, rtx src, rtx size, bool tailcall)
{
rtx dst_addr, src_addr;
- tree call_expr, arg_list, fn, src_tree, dst_tree, size_tree;
+ tree call_expr, fn, src_tree, dst_tree, size_tree;
enum machine_mode size_mode;
rtx retval;
size_tree = make_tree (sizetype, size);
fn = emit_block_move_libcall_fn (true);
- arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE);
- arg_list = tree_cons (NULL_TREE, src_tree, arg_list);
- arg_list = tree_cons (NULL_TREE, dst_tree, arg_list);
-
- /* Now we have to build up the CALL_EXPR itself. */
- call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
- call_expr = build3 (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
- call_expr, arg_list, NULL_TREE);
+ call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
+ CALL_EXPR_TAILCALL (call_expr) = tailcall;
- retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
+ retval = expand_normal (call_expr);
return retval;
}
TREE_PUBLIC (fn) = 1;
DECL_ARTIFICIAL (fn) = 1;
TREE_NOTHROW (fn) = 1;
+ DECL_VISIBILITY (fn) = VISIBILITY_DEFAULT;
+ DECL_VISIBILITY_SPECIFIED (fn) = 1;
block_move_fn = fn;
}
return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
}
-/* Emit code to move a block ORIG_SRC of type TYPE to a block DST,
- where DST is non-consecutive registers represented by a PARALLEL.
- SSIZE represents the total size of block ORIG_SRC in bytes, or -1
- if not known. */
+/* A subroutine of emit_group_load. Arguments as for emit_group_load,
+ except that values are placed in TMPS[i], and must later be moved
+ into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
-void
-emit_group_load (rtx dst, rtx orig_src, tree type ATTRIBUTE_UNUSED, int ssize)
+static void
+emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
{
- rtx *tmps, src;
+ rtx src;
int start, i;
+ enum machine_mode m = GET_MODE (orig_src);
gcc_assert (GET_CODE (dst) == PARALLEL);
+ if (m != VOIDmode
+ && !SCALAR_INT_MODE_P (m)
+ && !MEM_P (orig_src)
+ && GET_CODE (orig_src) != CONCAT)
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (orig_src));
+ if (imode == BLKmode)
+ src = assign_stack_temp (GET_MODE (orig_src), ssize, 0);
+ else
+ src = gen_reg_rtx (imode);
+ if (imode != BLKmode)
+ src = gen_lowpart (GET_MODE (orig_src), src);
+ emit_move_insn (src, orig_src);
+ /* ...and back again. */
+ if (imode != BLKmode)
+ src = gen_lowpart (imode, src);
+ emit_group_load_1 (tmps, dst, src, type, ssize);
+ return;
+ }
+
/* Check for a NULL entry, used to indicate that the parameter goes
both on the stack and in registers. */
if (XEXP (XVECEXP (dst, 0, 0), 0))
else
start = 1;
- tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0));
-
/* Process the pieces. */
for (i = start; i < XVECLEN (dst, 0); i++)
{
tmps[i] = gen_reg_rtx (mode);
emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
}
+ else if (COMPLEX_MODE_P (mode)
+ && GET_MODE (src) == mode
+ && bytelen == GET_MODE_SIZE (mode))
+ /* Let emit_move_complex do the bulk of the work. */
+ tmps[i] = src;
else if (GET_CODE (src) == CONCAT)
{
unsigned int slen = GET_MODE_SIZE (GET_MODE (src));
else
{
rtx mem;
-
+
gcc_assert (!bytepos);
mem = assign_stack_temp (GET_MODE (src), slen, 0);
emit_move_insn (mem, src);
- tmps[i] = adjust_address (mem, mode, 0);
+ tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
+ 0, 1, NULL_RTX, mode, mode);
}
}
/* FIXME: A SIMD parallel will eventually lead to a subreg of a
tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
build_int_cst (NULL_TREE, shift), tmps[i], 0);
}
+}
+
+/* Emit code to move a block SRC of type TYPE to a block DST,
+ where DST is non-consecutive registers represented by a PARALLEL.
+ SSIZE represents the total size of block ORIG_SRC in bytes, or -1
+ if not known. */
+
+void
+emit_group_load (rtx dst, rtx src, tree type, int ssize)
+{
+ rtx *tmps;
+ int i;
+
+ tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0));
+ emit_group_load_1 (tmps, dst, src, type, ssize);
/* Copy the extracted pieces into the proper (probable) hard regs. */
- for (i = start; i < XVECLEN (dst, 0); i++)
- emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0), tmps[i]);
+ for (i = 0; i < XVECLEN (dst, 0); i++)
+ {
+ rtx d = XEXP (XVECEXP (dst, 0, i), 0);
+ if (d == NULL)
+ continue;
+ emit_move_insn (d, tmps[i]);
+ }
+}
+
+/* Similar, but load SRC into new pseudos in a format that looks like
+ PARALLEL. This can later be fed to emit_group_move to get things
+ in the right place. */
+
+rtx
+emit_group_load_into_temps (rtx parallel, rtx src, tree type, int ssize)
+{
+ rtvec vec;
+ int i;
+
+ vec = rtvec_alloc (XVECLEN (parallel, 0));
+ emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
+
+ /* Convert the vector to look just like the original PARALLEL, except
+ with the computed values. */
+ for (i = 0; i < XVECLEN (parallel, 0); i++)
+ {
+ rtx e = XVECEXP (parallel, 0, i);
+ rtx d = XEXP (e, 0);
+
+ if (d)
+ {
+ d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
+ e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
+ }
+ RTVEC_ELT (vec, i) = e;
+ }
+
+ return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
}
/* Emit code to move a block SRC to block DST, where SRC and DST are
XEXP (XVECEXP (src, 0, i), 0));
}
+/* Move a group of registers represented by a PARALLEL into pseudos. */
+
+rtx
+emit_group_move_into_temps (rtx src)
+{
+ rtvec vec = rtvec_alloc (XVECLEN (src, 0));
+ int i;
+
+ for (i = 0; i < XVECLEN (src, 0); i++)
+ {
+ rtx e = XVECEXP (src, 0, i);
+ rtx d = XEXP (e, 0);
+
+ if (d)
+ e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
+ RTVEC_ELT (vec, i) = e;
+ }
+
+ return gen_rtx_PARALLEL (GET_MODE (src), vec);
+}
+
/* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
where SRC is non-consecutive registers represented by a PARALLEL.
SSIZE represents the total size of block ORIG_DST, or -1 if not
emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize)
{
rtx *tmps, dst;
- int start, i;
+ int start, finish, i;
+ enum machine_mode m = GET_MODE (orig_dst);
gcc_assert (GET_CODE (src) == PARALLEL);
+ if (!SCALAR_INT_MODE_P (m)
+ && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (orig_dst));
+ if (imode == BLKmode)
+ dst = assign_stack_temp (GET_MODE (orig_dst), ssize, 0);
+ else
+ dst = gen_reg_rtx (imode);
+ emit_group_store (dst, src, type, ssize);
+ if (imode != BLKmode)
+ dst = gen_lowpart (GET_MODE (orig_dst), dst);
+ emit_move_insn (orig_dst, dst);
+ return;
+ }
+
/* Check for a NULL entry, used to indicate that the parameter goes
both on the stack and in registers. */
if (XEXP (XVECEXP (src, 0, 0), 0))
start = 0;
else
start = 1;
+ finish = XVECLEN (src, 0);
- tmps = alloca (sizeof (rtx) * XVECLEN (src, 0));
+ tmps = alloca (sizeof (rtx) * finish);
/* Copy the (probable) hard regs into pseudos. */
- for (i = start; i < XVECLEN (src, 0); i++)
+ for (i = start; i < finish; i++)
{
rtx reg = XEXP (XVECEXP (src, 0, i), 0);
- tmps[i] = gen_reg_rtx (GET_MODE (reg));
- emit_move_insn (tmps[i], reg);
+ if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ {
+ tmps[i] = gen_reg_rtx (GET_MODE (reg));
+ emit_move_insn (tmps[i], reg);
+ }
+ else
+ tmps[i] = reg;
}
/* If we won't be storing directly into memory, protect the real destination
}
else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
{
- dst = gen_reg_rtx (GET_MODE (orig_dst));
+ enum machine_mode outer = GET_MODE (dst);
+ enum machine_mode inner;
+ HOST_WIDE_INT bytepos;
+ bool done = false;
+ rtx temp;
+
+ if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
+ dst = gen_reg_rtx (outer);
+
/* Make life a bit easier for combine. */
- emit_move_insn (dst, CONST0_RTX (GET_MODE (orig_dst)));
+ /* If the first element of the vector is the low part
+ of the destination mode, use a paradoxical subreg to
+ initialize the destination. */
+ if (start < finish)
+ {
+ inner = GET_MODE (tmps[start]);
+ bytepos = subreg_lowpart_offset (inner, outer);
+ if (INTVAL (XEXP (XVECEXP (src, 0, start), 1)) == bytepos)
+ {
+ temp = simplify_gen_subreg (outer, tmps[start],
+ inner, 0);
+ if (temp)
+ {
+ emit_move_insn (dst, temp);
+ done = true;
+ start++;
+ }
+ }
+ }
+
+ /* If the first element wasn't the low part, try the last. */
+ if (!done
+ && start < finish - 1)
+ {
+ inner = GET_MODE (tmps[finish - 1]);
+ bytepos = subreg_lowpart_offset (inner, outer);
+ if (INTVAL (XEXP (XVECEXP (src, 0, finish - 1), 1)) == bytepos)
+ {
+ temp = simplify_gen_subreg (outer, tmps[finish - 1],
+ inner, 0);
+ if (temp)
+ {
+ emit_move_insn (dst, temp);
+ done = true;
+ finish--;
+ }
+ }
+ }
+
+ /* Otherwise, simply initialize the result to zero. */
+ if (!done)
+ emit_move_insn (dst, CONST0_RTX (outer));
}
/* Process the pieces. */
- for (i = start; i < XVECLEN (src, 0); i++)
+ for (i = start; i < finish; i++)
{
HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
enum machine_mode mode = GET_MODE (tmps[i]);
use_reg (rtx *call_fusage, rtx reg)
{
gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
-
+
*call_fusage
= gen_rtx_EXPR_LIST (VOIDmode,
gen_rtx_USE (VOIDmode, reg), *call_fusage);
its length in bytes. */
rtx
-clear_storage (rtx object, rtx size)
+clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
+ unsigned int expected_align, HOST_WIDE_INT expected_size)
{
- rtx retval = 0;
- unsigned int align = (MEM_P (object) ? MEM_ALIGN (object)
- : GET_MODE_ALIGNMENT (GET_MODE (object)));
+ enum machine_mode mode = GET_MODE (object);
+ unsigned int align;
+
+ gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
- if (GET_MODE (object) != BLKmode
+ if (mode != BLKmode
&& GET_CODE (size) == CONST_INT
- && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (object)))
- emit_move_insn (object, CONST0_RTX (GET_MODE (object)));
- else
- {
- if (size == const0_rtx)
- ;
- else if (GET_CODE (size) == CONST_INT
- && CLEAR_BY_PIECES_P (INTVAL (size), align))
- clear_by_pieces (object, INTVAL (size), align);
- else if (clear_storage_via_clrmem (object, size, align))
- ;
- else
- retval = clear_storage_via_libcall (object, size);
- }
-
- return retval;
-}
-
-/* A subroutine of clear_storage. Expand a clrmem pattern;
- return true if successful. */
-
-static bool
-clear_storage_via_clrmem (rtx object, rtx size, unsigned int align)
-{
- /* Try the most limited insn first, because there's no point
- including more than one in the machine description unless
- the more limited one has some advantage. */
-
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
- enum machine_mode mode;
-
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (mode))
{
- enum insn_code code = clrmem_optab[(int) mode];
- insn_operand_predicate_fn pred;
-
- if (code != CODE_FOR_nothing
- /* We don't need MODE to be narrower than
- BITS_PER_HOST_WIDE_INT here because if SIZE is less than
- the mode mask, as it is returned by the macro, it will
- definitely be less than the actual mode mask. */
- && ((GET_CODE (size) == CONST_INT
- && ((unsigned HOST_WIDE_INT) INTVAL (size)
- <= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (object, BLKmode))
- && ((pred = insn_data[(int) code].operand[2].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
+ rtx zero = CONST0_RTX (mode);
+ if (zero != NULL)
{
- rtx op1;
- rtx last = get_last_insn ();
- rtx pat;
-
- op1 = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[1].predicate;
- if (pred != 0 && ! (*pred) (op1, mode))
- op1 = copy_to_mode_reg (mode, op1);
+ emit_move_insn (object, zero);
+ return NULL;
+ }
- pat = GEN_FCN ((int) code) (object, op1, opalign);
- if (pat)
+ if (COMPLEX_MODE_P (mode))
+ {
+ zero = CONST0_RTX (GET_MODE_INNER (mode));
+ if (zero != NULL)
{
- emit_insn (pat);
- return true;
+ write_complex_part (object, zero, 0);
+ write_complex_part (object, zero, 1);
+ return NULL;
}
- else
- delete_insns_since (last);
}
}
- return false;
+ if (size == const0_rtx)
+ return NULL;
+
+ align = MEM_ALIGN (object);
+
+ if (GET_CODE (size) == CONST_INT
+ && CLEAR_BY_PIECES_P (INTVAL (size), align))
+ clear_by_pieces (object, INTVAL (size), align);
+ else if (set_storage_via_setmem (object, size, const0_rtx, align,
+ expected_align, expected_size))
+ ;
+ else
+ return set_storage_via_libcall (object, size, const0_rtx,
+ method == BLOCK_OP_TAILCALL);
+
+ return NULL;
+}
+
+rtx
+clear_storage (rtx object, rtx size, enum block_op_methods method)
+{
+ return clear_storage_hints (object, size, method, 0, -1);
}
+
/* A subroutine of clear_storage. Expand a call to memset.
Return the return value of memset, 0 otherwise. */
-static rtx
-clear_storage_via_libcall (rtx object, rtx size)
+rtx
+set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
{
- tree call_expr, arg_list, fn, object_tree, size_tree;
+ tree call_expr, fn, object_tree, size_tree, val_tree;
enum machine_mode size_mode;
rtx retval;
for returning pointers, we could end up generating incorrect code. */
object_tree = make_tree (ptr_type_node, object);
+ if (GET_CODE (val) != CONST_INT)
+ val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
size_tree = make_tree (sizetype, size);
+ val_tree = make_tree (integer_type_node, val);
fn = clear_storage_libcall_fn (true);
- arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE);
- arg_list = tree_cons (NULL_TREE, integer_zero_node, arg_list);
- arg_list = tree_cons (NULL_TREE, object_tree, arg_list);
+ call_expr = build_call_expr (fn, 3,
+ object_tree, integer_zero_node, size_tree);
+ CALL_EXPR_TAILCALL (call_expr) = tailcall;
- /* Now we have to build up the CALL_EXPR itself. */
- call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
- call_expr = build3 (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
- call_expr, arg_list, NULL_TREE);
-
- retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
+ retval = expand_normal (call_expr);
return retval;
}
-/* A subroutine of clear_storage_via_libcall. Create the tree node
+/* A subroutine of set_storage_via_libcall. Create the tree node
for the function we use for block clears. The first time FOR_CALL
is true, we call assemble_external. */
TREE_PUBLIC (fn) = 1;
DECL_ARTIFICIAL (fn) = 1;
TREE_NOTHROW (fn) = 1;
+ DECL_VISIBILITY (fn) = VISIBILITY_DEFAULT;
+ DECL_VISIBILITY_SPECIFIED (fn) = 1;
block_clear_fn = fn;
}
return block_clear_fn;
}
\f
-/* Generate code to copy Y into X.
- Both Y and X must have the same mode, except that
- Y can be a constant with VOIDmode.
- This mode cannot be BLKmode; use emit_block_move for that.
-
- Return the last instruction emitted. */
+/* Expand a setmem pattern; return true if successful. */
-rtx
-emit_move_insn (rtx x, rtx y)
+bool
+set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
+ unsigned int expected_align, HOST_WIDE_INT expected_size)
{
- enum machine_mode mode = GET_MODE (x);
- rtx y_cst = NULL_RTX;
- rtx last_insn, set;
+ /* Try the most limited insn first, because there's no point
+ including more than one in the machine description unless
+ the more limited one has some advantage. */
- gcc_assert (mode != BLKmode
- && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
+ rtx opalign = GEN_INT (align / BITS_PER_UNIT);
+ enum machine_mode mode;
- if (CONSTANT_P (y))
- {
- if (optimize
- && SCALAR_FLOAT_MODE_P (GET_MODE (x))
- && (last_insn = compress_float_constant (x, y)))
- return last_insn;
+ if (expected_align < align)
+ expected_align = align;
- y_cst = y;
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ enum insn_code code = setmem_optab[(int) mode];
+ insn_operand_predicate_fn pred;
- if (!LEGITIMATE_CONSTANT_P (y))
+ if (code != CODE_FOR_nothing
+ /* We don't need MODE to be narrower than
+ BITS_PER_HOST_WIDE_INT here because if SIZE is less than
+ the mode mask, as it is returned by the macro, it will
+ definitely be less than the actual mode mask. */
+ && ((GET_CODE (size) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (size)
+ <= (GET_MODE_MASK (mode) >> 1)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
+ && ((pred = insn_data[(int) code].operand[0].predicate) == 0
+ || (*pred) (object, BLKmode))
+ && ((pred = insn_data[(int) code].operand[3].predicate) == 0
+ || (*pred) (opalign, VOIDmode)))
{
- y = force_const_mem (mode, y);
+ rtx opsize, opchar;
+ enum machine_mode char_mode;
+ rtx last = get_last_insn ();
+ rtx pat;
- /* If the target's cannot_force_const_mem prevented the spill,
- assume that the target's move expanders will also take care
- of the non-legitimate constant. */
- if (!y)
- y = y_cst;
+ opsize = convert_to_mode (mode, size, 1);
+ pred = insn_data[(int) code].operand[1].predicate;
+ if (pred != 0 && ! (*pred) (opsize, mode))
+ opsize = copy_to_mode_reg (mode, opsize);
+
+ opchar = val;
+ char_mode = insn_data[(int) code].operand[2].mode;
+ if (char_mode != VOIDmode)
+ {
+ opchar = convert_to_mode (char_mode, opchar, 1);
+ pred = insn_data[(int) code].operand[2].predicate;
+ if (pred != 0 && ! (*pred) (opchar, char_mode))
+ opchar = copy_to_mode_reg (char_mode, opchar);
+ }
+
+ if (insn_data[(int) code].n_operands == 4)
+ pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign);
+ else
+ pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign,
+ GEN_INT (expected_align),
+ GEN_INT (expected_size));
+ if (pat)
+ {
+ emit_insn (pat);
+ return true;
+ }
+ else
+ delete_insns_since (last);
}
}
- /* If X or Y are memory references, verify that their addresses are valid
- for the machine. */
- if (MEM_P (x)
- && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
- && ! push_operand (x, GET_MODE (x)))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
- x = validize_mem (x);
+ return false;
+}
- if (MEM_P (y)
- && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
- y = validize_mem (y);
+\f
+/* Write to one of the components of the complex value CPLX. Write VAL to
+ the real part if IMAG_P is false, and the imaginary part if its true. */
- gcc_assert (mode != BLKmode);
+static void
+write_complex_part (rtx cplx, rtx val, bool imag_p)
+{
+ enum machine_mode cmode;
+ enum machine_mode imode;
+ unsigned ibitsize;
- last_insn = emit_move_insn_1 (x, y);
+ if (GET_CODE (cplx) == CONCAT)
+ {
+ emit_move_insn (XEXP (cplx, imag_p), val);
+ return;
+ }
- if (y_cst && REG_P (x)
- && (set = single_set (last_insn)) != NULL_RTX
- && SET_DEST (set) == x
- && ! rtx_equal_p (y_cst, SET_SRC (set)))
- set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+ cmode = GET_MODE (cplx);
+ imode = GET_MODE_INNER (cmode);
+ ibitsize = GET_MODE_BITSIZE (imode);
- return last_insn;
+ /* For MEMs simplify_gen_subreg may generate an invalid new address
+ because, e.g., the original address is considered mode-dependent
+ by the target, which restricts simplify_subreg from invoking
+ adjust_address_nv. Instead of preparing fallback support for an
+ invalid address, we call adjust_address_nv directly. */
+ if (MEM_P (cplx))
+ {
+ emit_move_insn (adjust_address_nv (cplx, imode,
+ imag_p ? GET_MODE_SIZE (imode) : 0),
+ val);
+ return;
+ }
+
+ /* If the sub-object is at least word sized, then we know that subregging
+ will work. This special case is important, since store_bit_field
+ wants to operate on integer modes, and there's rarely an OImode to
+ correspond to TCmode. */
+ if (ibitsize >= BITS_PER_WORD
+ /* For hard regs we have exact predicates. Assume we can split
+ the original object if it spans an even number of hard regs.
+ This special case is important for SCmode on 64-bit platforms
+ where the natural size of floating-point regs is 32-bit. */
+ || (REG_P (cplx)
+ && REGNO (cplx) < FIRST_PSEUDO_REGISTER
+ && hard_regno_nregs[REGNO (cplx)][cmode] % 2 == 0))
+ {
+ rtx part = simplify_gen_subreg (imode, cplx, cmode,
+ imag_p ? GET_MODE_SIZE (imode) : 0);
+ if (part)
+ {
+ emit_move_insn (part, val);
+ return;
+ }
+ else
+ /* simplify_gen_subreg may fail for sub-word MEMs. */
+ gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
+ }
+
+ store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, imode, val);
}
-/* Low level part of emit_move_insn.
- Called just like emit_move_insn, but assumes X and Y
- are basically valid. */
+/* Extract one of the components of the complex value CPLX. Extract the
+ real part if IMAG_P is false, and the imaginary part if it's true. */
-rtx
-emit_move_insn_1 (rtx x, rtx y)
+static rtx
+read_complex_part (rtx cplx, bool imag_p)
{
- enum machine_mode mode = GET_MODE (x);
- enum machine_mode submode;
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum machine_mode cmode, imode;
+ unsigned ibitsize;
- gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
+ if (GET_CODE (cplx) == CONCAT)
+ return XEXP (cplx, imag_p);
- if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
- return
- emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y));
+ cmode = GET_MODE (cplx);
+ imode = GET_MODE_INNER (cmode);
+ ibitsize = GET_MODE_BITSIZE (imode);
- /* Expand complex moves by moving real part and imag part, if possible. */
- else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
- && BLKmode != (submode = GET_MODE_INNER (mode))
- && (mov_optab->handlers[(int) submode].insn_code
- != CODE_FOR_nothing))
+ /* Special case reads from complex constants that got spilled to memory. */
+ if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
{
- /* Don't split destination if it is a stack push. */
- int stack = push_operand (x, GET_MODE (x));
+ tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
+ if (decl && TREE_CODE (decl) == COMPLEX_CST)
+ {
+ tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
+ if (CONSTANT_CLASS_P (part))
+ return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
+ }
+ }
-#ifdef PUSH_ROUNDING
- /* In case we output to the stack, but the size is smaller than the
- machine can push exactly, we need to use move instructions. */
- if (stack
- && (PUSH_ROUNDING (GET_MODE_SIZE (submode))
- != GET_MODE_SIZE (submode)))
- {
- rtx temp;
- HOST_WIDE_INT offset1, offset2;
-
- /* Do not use anti_adjust_stack, since we don't want to update
- stack_pointer_delta. */
- temp = expand_binop (Pmode,
-#ifdef STACK_GROWS_DOWNWARD
- sub_optab,
-#else
- add_optab,
-#endif
- stack_pointer_rtx,
- GEN_INT
- (PUSH_ROUNDING
- (GET_MODE_SIZE (GET_MODE (x)))),
- stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
+ /* For MEMs simplify_gen_subreg may generate an invalid new address
+ because, e.g., the original address is considered mode-dependent
+ by the target, which restricts simplify_subreg from invoking
+ adjust_address_nv. Instead of preparing fallback support for an
+ invalid address, we call adjust_address_nv directly. */
+ if (MEM_P (cplx))
+ return adjust_address_nv (cplx, imode,
+ imag_p ? GET_MODE_SIZE (imode) : 0);
+
+ /* If the sub-object is at least word sized, then we know that subregging
+ will work. This special case is important, since extract_bit_field
+ wants to operate on integer modes, and there's rarely an OImode to
+ correspond to TCmode. */
+ if (ibitsize >= BITS_PER_WORD
+ /* For hard regs we have exact predicates. Assume we can split
+ the original object if it spans an even number of hard regs.
+ This special case is important for SCmode on 64-bit platforms
+ where the natural size of floating-point regs is 32-bit. */
+ || (REG_P (cplx)
+ && REGNO (cplx) < FIRST_PSEUDO_REGISTER
+ && hard_regno_nregs[REGNO (cplx)][cmode] % 2 == 0))
+ {
+ rtx ret = simplify_gen_subreg (imode, cplx, cmode,
+ imag_p ? GET_MODE_SIZE (imode) : 0);
+ if (ret)
+ return ret;
+ else
+ /* simplify_gen_subreg may fail for sub-word MEMs. */
+ gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
+ }
- if (temp != stack_pointer_rtx)
- emit_move_insn (stack_pointer_rtx, temp);
+ return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
+ true, NULL_RTX, imode, imode);
+}
+\f
+/* A subroutine of emit_move_insn_1. Yet another lowpart generator.
+ NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
+ represented in NEW_MODE. If FORCE is true, this will never happen, as
+ we'll force-create a SUBREG if needed. */
-#ifdef STACK_GROWS_DOWNWARD
- offset1 = 0;
- offset2 = GET_MODE_SIZE (submode);
-#else
- offset1 = -PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)));
- offset2 = (-PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)))
- + GET_MODE_SIZE (submode));
-#endif
+static rtx
+emit_move_change_mode (enum machine_mode new_mode,
+ enum machine_mode old_mode, rtx x, bool force)
+{
+ rtx ret;
- emit_move_insn (change_address (x, submode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset1))),
- gen_realpart (submode, y));
- emit_move_insn (change_address (x, submode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset2))),
- gen_imagpart (submode, y));
+ if (push_operand (x, GET_MODE (x)))
+ {
+ ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
+ MEM_COPY_ATTRIBUTES (ret, x);
+ }
+ else if (MEM_P (x))
+ {
+ /* We don't have to worry about changing the address since the
+ size in bytes is supposed to be the same. */
+ if (reload_in_progress)
+ {
+ /* Copy the MEM to change the mode and move any
+ substitutions from the old MEM to the new one. */
+ ret = adjust_address_nv (x, new_mode, 0);
+ copy_replacements (x, ret);
}
else
+ ret = adjust_address (x, new_mode, 0);
+ }
+ else
+ {
+ /* Note that we do want simplify_subreg's behavior of validating
+ that the new mode is ok for a hard register. If we were to use
+ simplify_gen_subreg, we would create the subreg, but would
+ probably run into the target not being able to implement it. */
+ /* Except, of course, when FORCE is true, when this is exactly what
+ we want. Which is needed for CCmodes on some targets. */
+ if (force)
+ ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
+ else
+ ret = simplify_subreg (new_mode, x, old_mode, 0);
+ }
+
+ return ret;
+}
+
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X using
+ an integer mode of the same size as MODE. Returns the instruction
+ emitted, or NULL if such a move could not be generated. */
+
+static rtx
+emit_move_via_integer (enum machine_mode mode, rtx x, rtx y, bool force)
+{
+ enum machine_mode imode;
+ enum insn_code code;
+
+ /* There must exist a mode of the exact size we require. */
+ imode = int_mode_for_mode (mode);
+ if (imode == BLKmode)
+ return NULL_RTX;
+
+ /* The target must support moves in this mode. */
+ code = mov_optab->handlers[imode].insn_code;
+ if (code == CODE_FOR_nothing)
+ return NULL_RTX;
+
+ x = emit_move_change_mode (imode, mode, x, force);
+ if (x == NULL_RTX)
+ return NULL_RTX;
+ y = emit_move_change_mode (imode, mode, y, force);
+ if (y == NULL_RTX)
+ return NULL_RTX;
+ return emit_insn (GEN_FCN (code) (x, y));
+}
+
+/* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
+ Return an equivalent MEM that does not use an auto-increment. */
+
+static rtx
+emit_move_resolve_push (enum machine_mode mode, rtx x)
+{
+ enum rtx_code code = GET_CODE (XEXP (x, 0));
+ HOST_WIDE_INT adjust;
+ rtx temp;
+
+ adjust = GET_MODE_SIZE (mode);
+#ifdef PUSH_ROUNDING
+ adjust = PUSH_ROUNDING (adjust);
#endif
- /* If this is a stack, push the highpart first, so it
- will be in the argument order.
+ if (code == PRE_DEC || code == POST_DEC)
+ adjust = -adjust;
+ else if (code == PRE_MODIFY || code == POST_MODIFY)
+ {
+ rtx expr = XEXP (XEXP (x, 0), 1);
+ HOST_WIDE_INT val;
+
+ gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
+ gcc_assert (GET_CODE (XEXP (expr, 1)) == CONST_INT);
+ val = INTVAL (XEXP (expr, 1));
+ if (GET_CODE (expr) == MINUS)
+ val = -val;
+ gcc_assert (adjust == val || adjust == -val);
+ adjust = val;
+ }
+
+ /* Do not use anti_adjust_stack, since we don't want to update
+ stack_pointer_delta. */
+ temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
+ GEN_INT (adjust), stack_pointer_rtx,
+ 0, OPTAB_LIB_WIDEN);
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
- In that case, change_address is used only to convert
- the mode, not to change the address. */
- if (stack)
- {
- /* Note that the real part always precedes the imag part in memory
- regardless of machine's endianness. */
-#ifdef STACK_GROWS_DOWNWARD
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_imagpart (submode, y));
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_realpart (submode, y));
-#else
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_realpart (submode, y));
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_imagpart (submode, y));
+ switch (code)
+ {
+ case PRE_INC:
+ case PRE_DEC:
+ case PRE_MODIFY:
+ temp = stack_pointer_rtx;
+ break;
+ case POST_INC:
+ case POST_DEC:
+ case POST_MODIFY:
+ temp = plus_constant (stack_pointer_rtx, -adjust);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return replace_equiv_address (x, temp);
+}
+
+/* A subroutine of emit_move_complex. Generate a move from Y into X.
+ X is known to satisfy push_operand, and MODE is known to be complex.
+ Returns the last instruction emitted. */
+
+rtx
+emit_move_complex_push (enum machine_mode mode, rtx x, rtx y)
+{
+ enum machine_mode submode = GET_MODE_INNER (mode);
+ bool imag_first;
+
+#ifdef PUSH_ROUNDING
+ unsigned int submodesize = GET_MODE_SIZE (submode);
+
+ /* In case we output to the stack, but the size is smaller than the
+ machine can push exactly, we need to use move instructions. */
+ if (PUSH_ROUNDING (submodesize) != submodesize)
+ {
+ x = emit_move_resolve_push (mode, x);
+ return emit_move_insn (x, y);
+ }
#endif
- }
- else
- {
- rtx realpart_x, realpart_y;
- rtx imagpart_x, imagpart_y;
-
- /* If this is a complex value with each part being smaller than a
- word, the usual calling sequence will likely pack the pieces into
- a single register. Unfortunately, SUBREG of hard registers only
- deals in terms of words, so we have a problem converting input
- arguments to the CONCAT of two registers that is used elsewhere
- for complex values. If this is before reload, we can copy it into
- memory and reload. FIXME, we should see about using extract and
- insert on integer registers, but complex short and complex char
- variables should be rarely used. */
- if (GET_MODE_BITSIZE (mode) < 2 * BITS_PER_WORD
- && (reload_in_progress | reload_completed) == 0)
- {
- int packed_dest_p
- = (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER);
- int packed_src_p
- = (REG_P (y) && REGNO (y) < FIRST_PSEUDO_REGISTER);
- if (packed_dest_p || packed_src_p)
- {
- enum mode_class reg_class = ((class == MODE_COMPLEX_FLOAT)
- ? MODE_FLOAT : MODE_INT);
+ /* Note that the real part always precedes the imag part in memory
+ regardless of machine's endianness. */
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case PRE_DEC:
+ case POST_DEC:
+ imag_first = true;
+ break;
+ case PRE_INC:
+ case POST_INC:
+ imag_first = false;
+ break;
+ default:
+ gcc_unreachable ();
+ }
- enum machine_mode reg_mode
- = mode_for_size (GET_MODE_BITSIZE (mode), reg_class, 1);
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ read_complex_part (y, imag_first));
+ return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ read_complex_part (y, !imag_first));
+}
- if (reg_mode != BLKmode)
- {
- rtx mem = assign_stack_temp (reg_mode,
- GET_MODE_SIZE (mode), 0);
- rtx cmem = adjust_address (mem, mode, 0);
+/* A subroutine of emit_move_complex. Perform the move from Y to X
+ via two moves of the parts. Returns the last instruction emitted. */
- if (packed_dest_p)
- {
- rtx sreg = gen_rtx_SUBREG (reg_mode, x, 0);
+rtx
+emit_move_complex_parts (rtx x, rtx y)
+{
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values. */
+ if (!reload_completed && !reload_in_progress
+ && REG_P (x) && !reg_overlap_mentioned_p (x, y))
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
- emit_move_insn_1 (cmem, y);
- return emit_move_insn_1 (sreg, mem);
- }
- else
- {
- rtx sreg = gen_rtx_SUBREG (reg_mode, y, 0);
+ write_complex_part (x, read_complex_part (y, false), false);
+ write_complex_part (x, read_complex_part (y, true), true);
- emit_move_insn_1 (mem, sreg);
- return emit_move_insn_1 (x, cmem);
- }
- }
- }
- }
+ return get_last_insn ();
+}
- realpart_x = gen_realpart (submode, x);
- realpart_y = gen_realpart (submode, y);
- imagpart_x = gen_imagpart (submode, x);
- imagpart_y = gen_imagpart (submode, y);
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
+ MODE is known to be complex. Returns the last instruction emitted. */
- /* Show the output dies here. This is necessary for SUBREGs
- of pseudos since we cannot track their lifetimes correctly;
- hard regs shouldn't appear here except as return values.
- We never want to emit such a clobber after reload. */
- if (x != y
- && ! (reload_in_progress || reload_completed)
- && (GET_CODE (realpart_x) == SUBREG
- || GET_CODE (imagpart_x) == SUBREG))
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+static rtx
+emit_move_complex (enum machine_mode mode, rtx x, rtx y)
+{
+ bool try_int;
+
+ /* Need to take special care for pushes, to maintain proper ordering
+ of the data, and possibly extra padding. */
+ if (push_operand (x, mode))
+ return emit_move_complex_push (mode, x, y);
+
+ /* See if we can coerce the target into moving both values at once. */
+
+ /* Move floating point as parts. */
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ && mov_optab->handlers[GET_MODE_INNER (mode)].insn_code != CODE_FOR_nothing)
+ try_int = false;
+ /* Not possible if the values are inherently not adjacent. */
+ else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
+ try_int = false;
+ /* Is possible if both are registers (or subregs of registers). */
+ else if (register_operand (x, mode) && register_operand (y, mode))
+ try_int = true;
+ /* If one of the operands is a memory, and alignment constraints
+ are friendly enough, we may be able to do combined memory operations.
+ We do not attempt this if Y is a constant because that combination is
+ usually better with the by-parts thing below. */
+ else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
+ && (!STRICT_ALIGNMENT
+ || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
+ try_int = true;
+ else
+ try_int = false;
- emit_move_insn (realpart_x, realpart_y);
- emit_move_insn (imagpart_x, imagpart_y);
+ if (try_int)
+ {
+ rtx ret;
+
+ /* For memory to memory moves, optimal behavior can be had with the
+ existing block move logic. */
+ if (MEM_P (x) && MEM_P (y))
+ {
+ emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)),
+ BLOCK_OP_NO_LIBCALL);
+ return get_last_insn ();
}
- return get_last_insn ();
+ ret = emit_move_via_integer (mode, x, y, true);
+ if (ret)
+ return ret;
}
- /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
- find a mode to do it in. If we have a movcc, use it. Otherwise,
- find the MODE_INT mode of the same width. */
- else if (GET_MODE_CLASS (mode) == MODE_CC
- && mov_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing)
+ return emit_move_complex_parts (x, y);
+}
+
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
+ MODE is known to be MODE_CC. Returns the last instruction emitted. */
+
+static rtx
+emit_move_ccmode (enum machine_mode mode, rtx x, rtx y)
+{
+ rtx ret;
+
+ /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
+ if (mode != CCmode)
{
- enum insn_code insn_code;
- enum machine_mode tmode = VOIDmode;
- rtx x1 = x, y1 = y;
+ enum insn_code code = mov_optab->handlers[CCmode].insn_code;
+ if (code != CODE_FOR_nothing)
+ {
+ x = emit_move_change_mode (CCmode, mode, x, true);
+ y = emit_move_change_mode (CCmode, mode, y, true);
+ return emit_insn (GEN_FCN (code) (x, y));
+ }
+ }
- if (mode != CCmode
- && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing)
- tmode = CCmode;
- else
- for (tmode = QImode; tmode != VOIDmode;
- tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode))
- break;
+ /* Otherwise, find the MODE_INT mode of the same width. */
+ ret = emit_move_via_integer (mode, x, y, false);
+ gcc_assert (ret != NULL);
+ return ret;
+}
- gcc_assert (tmode != VOIDmode);
+/* Return true if word I of OP lies entirely in the
+ undefined bits of a paradoxical subreg. */
- /* Get X and Y in TMODE. We can't use gen_lowpart here because it
- may call change_address which is not appropriate if we were
- called when a reload was in progress. We don't have to worry
- about changing the address since the size in bytes is supposed to
- be the same. Copy the MEM to change the mode and move any
- substitutions from the old MEM to the new one. */
+static bool
+undefined_operand_subword_p (rtx op, int i)
+{
+ enum machine_mode innermode, innermostmode;
+ int offset;
+ if (GET_CODE (op) != SUBREG)
+ return false;
+ innermode = GET_MODE (op);
+ innermostmode = GET_MODE (SUBREG_REG (op));
+ offset = i * UNITS_PER_WORD + SUBREG_BYTE (op);
+ /* The SUBREG_BYTE represents offset, as if the value were stored in
+ memory, except for a paradoxical subreg where we define
+ SUBREG_BYTE to be 0; undo this exception as in
+ simplify_subreg. */
+ if (SUBREG_BYTE (op) == 0
+ && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
+ {
+ int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
+ if (WORDS_BIG_ENDIAN)
+ offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset += difference % UNITS_PER_WORD;
+ }
+ if (offset >= GET_MODE_SIZE (innermostmode)
+ || offset <= -GET_MODE_SIZE (word_mode))
+ return true;
+ return false;
+}
- if (reload_in_progress)
- {
- x = gen_lowpart_common (tmode, x1);
- if (x == 0 && MEM_P (x1))
- {
- x = adjust_address_nv (x1, tmode, 0);
- copy_replacements (x1, x);
- }
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
+ MODE is any multi-word or full-word mode that lacks a move_insn
+ pattern. Note that you will get better code if you define such
+ patterns, even if they must turn into multiple assembler instructions. */
- y = gen_lowpart_common (tmode, y1);
- if (y == 0 && MEM_P (y1))
- {
- y = adjust_address_nv (y1, tmode, 0);
- copy_replacements (y1, y);
- }
- }
- else
+static rtx
+emit_move_multi_word (enum machine_mode mode, rtx x, rtx y)
+{
+ rtx last_insn = 0;
+ rtx seq, inner;
+ bool need_clobber;
+ int i;
+
+ gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
+
+ /* If X is a push on the stack, do the push now and replace
+ X with a reference to the stack pointer. */
+ if (push_operand (x, mode))
+ x = emit_move_resolve_push (mode, x);
+
+ /* If we are in reload, see if either operand is a MEM whose address
+ is scheduled for replacement. */
+ if (reload_in_progress && MEM_P (x)
+ && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
+ x = replace_equiv_address_nv (x, inner);
+ if (reload_in_progress && MEM_P (y)
+ && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
+ y = replace_equiv_address_nv (y, inner);
+
+ start_sequence ();
+
+ need_clobber = false;
+ for (i = 0;
+ i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ i++)
+ {
+ rtx xpart = operand_subword (x, i, 1, mode);
+ rtx ypart;
+
+ /* Do not generate code for a move if it would come entirely
+ from the undefined bits of a paradoxical subreg. */
+ if (undefined_operand_subword_p (y, i))
+ continue;
+
+ ypart = operand_subword (y, i, 1, mode);
+
+ /* If we can't get a part of Y, put Y into memory if it is a
+ constant. Otherwise, force it into a register. Then we must
+ be able to get a part of Y. */
+ if (ypart == 0 && CONSTANT_P (y))
{
- x = gen_lowpart (tmode, x);
- y = gen_lowpart (tmode, y);
+ y = use_anchored_address (force_const_mem (mode, y));
+ ypart = operand_subword (y, i, 1, mode);
}
+ else if (ypart == 0)
+ ypart = operand_subword_force (y, i, mode);
+
+ gcc_assert (xpart && ypart);
+
+ need_clobber |= (GET_CODE (xpart) == SUBREG);
+
+ last_insn = emit_move_insn (xpart, ypart);
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed)
+ && need_clobber != 0)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+
+ emit_insn (seq);
+
+ return last_insn;
+}
+
+/* Low level part of emit_move_insn.
+ Called just like emit_move_insn, but assumes X and Y
+ are basically valid. */
+
+rtx
+emit_move_insn_1 (rtx x, rtx y)
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum insn_code code;
+
+ gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
+
+ code = mov_optab->handlers[mode].insn_code;
+ if (code != CODE_FOR_nothing)
+ return emit_insn (GEN_FCN (code) (x, y));
+
+ /* Expand complex moves by moving real part and imag part. */
+ if (COMPLEX_MODE_P (mode))
+ return emit_move_complex (mode, x, y);
- insn_code = mov_optab->handlers[(int) tmode].insn_code;
- return emit_insn (GEN_FCN (insn_code) (x, y));
+ if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT)
+ {
+ rtx result = emit_move_via_integer (mode, x, y, true);
+
+ /* If we can't find an integer mode, use multi words. */
+ if (result)
+ return result;
+ else
+ return emit_move_multi_word (mode, x, y);
}
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ return emit_move_ccmode (mode, x, y);
+
/* Try using a move pattern for the corresponding integer mode. This is
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
- else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (submode = int_mode_for_mode (mode)) != BLKmode
- && mov_optab->handlers[submode].insn_code != CODE_FOR_nothing)
- return emit_insn (GEN_FCN (mov_optab->handlers[submode].insn_code)
- (simplify_gen_subreg (submode, x, mode, 0),
- simplify_gen_subreg (submode, y, mode, 0)));
-
- /* This will handle any multi-word or full-word mode that lacks a move_insn
- pattern. However, you will get better code if you define such patterns,
- even if they must turn into multiple assembler instructions. */
- else
+ if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- rtx last_insn = 0;
- rtx seq, inner;
- int need_clobber;
- int i;
-
- gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
-
-#ifdef PUSH_ROUNDING
-
- /* If X is a push on the stack, do the push now and replace
- X with a reference to the stack pointer. */
- if (push_operand (x, GET_MODE (x)))
- {
- rtx temp;
- enum rtx_code code;
+ rtx ret = emit_move_via_integer (mode, x, y, false);
+ if (ret)
+ return ret;
+ }
- /* Do not use anti_adjust_stack, since we don't want to update
- stack_pointer_delta. */
- temp = expand_binop (Pmode,
-#ifdef STACK_GROWS_DOWNWARD
- sub_optab,
-#else
- add_optab,
-#endif
- stack_pointer_rtx,
- GEN_INT
- (PUSH_ROUNDING
- (GET_MODE_SIZE (GET_MODE (x)))),
- stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
-
- if (temp != stack_pointer_rtx)
- emit_move_insn (stack_pointer_rtx, temp);
-
- code = GET_CODE (XEXP (x, 0));
-
- /* Just hope that small offsets off SP are OK. */
- if (code == POST_INC)
- temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (-((HOST_WIDE_INT)
- GET_MODE_SIZE (GET_MODE (x)))));
- else if (code == POST_DEC)
- temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (GET_MODE_SIZE (GET_MODE (x))));
- else
- temp = stack_pointer_rtx;
+ return emit_move_multi_word (mode, x, y);
+}
- x = change_address (x, VOIDmode, temp);
- }
-#endif
+/* Generate code to copy Y into X.
+ Both Y and X must have the same mode, except that
+ Y can be a constant with VOIDmode.
+ This mode cannot be BLKmode; use emit_block_move for that.
- /* If we are in reload, see if either operand is a MEM whose address
- is scheduled for replacement. */
- if (reload_in_progress && MEM_P (x)
- && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
- x = replace_equiv_address_nv (x, inner);
- if (reload_in_progress && MEM_P (y)
- && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
- y = replace_equiv_address_nv (y, inner);
+ Return the last instruction emitted. */
- start_sequence ();
+rtx
+emit_move_insn (rtx x, rtx y)
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx y_cst = NULL_RTX;
+ rtx last_insn, set;
- need_clobber = 0;
- for (i = 0;
- i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
- i++)
- {
- rtx xpart = operand_subword (x, i, 1, mode);
- rtx ypart = operand_subword (y, i, 1, mode);
+ gcc_assert (mode != BLKmode
+ && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
- /* If we can't get a part of Y, put Y into memory if it is a
- constant. Otherwise, force it into a register. If we still
- can't get a part of Y, abort. */
- if (ypart == 0 && CONSTANT_P (y))
- {
- y = force_const_mem (mode, y);
- ypart = operand_subword (y, i, 1, mode);
- }
- else if (ypart == 0)
- ypart = operand_subword_force (y, i, mode);
+ if (CONSTANT_P (y))
+ {
+ if (optimize
+ && SCALAR_FLOAT_MODE_P (GET_MODE (x))
+ && (last_insn = compress_float_constant (x, y)))
+ return last_insn;
- gcc_assert (xpart && ypart);
+ y_cst = y;
- need_clobber |= (GET_CODE (xpart) == SUBREG);
+ if (!LEGITIMATE_CONSTANT_P (y))
+ {
+ y = force_const_mem (mode, y);
- last_insn = emit_move_insn (xpart, ypart);
+ /* If the target's cannot_force_const_mem prevented the spill,
+ assume that the target's move expanders will also take care
+ of the non-legitimate constant. */
+ if (!y)
+ y = y_cst;
+ else
+ y = use_anchored_address (y);
}
+ }
- seq = get_insns ();
- end_sequence ();
+ /* If X or Y are memory references, verify that their addresses are valid
+ for the machine. */
+ if (MEM_P (x)
+ && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
+ && ! push_operand (x, GET_MODE (x)))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
+ x = validize_mem (x);
- /* Show the output dies here. This is necessary for SUBREGs
- of pseudos since we cannot track their lifetimes correctly;
- hard regs shouldn't appear here except as return values.
- We never want to emit such a clobber after reload. */
- if (x != y
- && ! (reload_in_progress || reload_completed)
- && need_clobber != 0)
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+ if (MEM_P (y)
+ && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
+ y = validize_mem (y);
- emit_insn (seq);
+ gcc_assert (mode != BLKmode);
- return last_insn;
- }
+ last_insn = emit_move_insn_1 (x, y);
+
+ if (y_cst && REG_P (x)
+ && (set = single_set (last_insn)) != NULL_RTX
+ && SET_DEST (set) == x
+ && ! rtx_equal_p (y_cst, SET_SRC (set)))
+ set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+
+ return last_insn;
}
/* If Y is representable exactly in a narrower mode, and the target can
enum machine_mode orig_srcmode = GET_MODE (y);
enum machine_mode srcmode;
REAL_VALUE_TYPE r;
+ int oldcost, newcost;
REAL_VALUE_FROM_CONST_DOUBLE (r, y);
+ if (LEGITIMATE_CONSTANT_P (y))
+ oldcost = rtx_cost (y, SET);
+ else
+ oldcost = rtx_cost (force_const_mem (dstmode, y), SET);
+
for (srcmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (orig_srcmode));
srcmode != orig_srcmode;
srcmode = GET_MODE_WIDER_MODE (srcmode))
the extension. */
if (! (*insn_data[ic].operand[1].predicate) (trunc_y, srcmode))
continue;
+ /* This is valid, but may not be cheaper than the original. */
+ newcost = rtx_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y), SET);
+ if (oldcost < newcost)
+ continue;
}
else if (float_extend_from_mem[dstmode][srcmode])
- trunc_y = validize_mem (force_const_mem (srcmode, trunc_y));
+ {
+ trunc_y = force_const_mem (srcmode, trunc_y);
+ /* This is valid, but may not be cheaper than the original. */
+ newcost = rtx_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y), SET);
+ if (oldcost < newcost)
+ continue;
+ trunc_y = validize_mem (trunc_y);
+ }
else
continue;
+ /* For CSE's benefit, force the compressed constant pool entry
+ into a new pseudo. This constant may be used in different modes,
+ and if not, combine will put things back together for us. */
+ trunc_y = force_reg (srcmode, trunc_y);
emit_unop_insn (ic, x, trunc_y, UNKNOWN);
last_insn = get_last_insn ();
ALIGN (in bits) is maximum alignment we can assume.
If PARTIAL and REG are both nonzero, then copy that many of the first
- words of X into registers starting with REG, and push the rest of X.
- The amount of space pushed is decreased by PARTIAL words,
- rounded *down* to a multiple of PARM_BOUNDARY.
+ bytes of X into registers starting with REG, and push the rest of X.
+ The amount of space pushed is decreased by PARTIAL bytes.
REG must be a hard register in this case.
If REG is zero but PARTIAL is not, take any all others actions for an
argument partially in registers, but do not actually load any
xinner = x;
- if (mode == BLKmode)
+ if (mode == BLKmode
+ || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)))
{
/* Copy a block into the stack, entirely or partially. */
rtx temp;
- int used = partial * UNITS_PER_WORD;
+ int used;
int offset;
int skip;
- if (reg && GET_CODE (reg) == PARALLEL)
+ offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
+ used = partial - offset;
+
+ if (mode != BLKmode)
{
- /* Use the size of the elt to compute offset. */
- rtx elt = XEXP (XVECEXP (reg, 0, 0), 0);
- used = partial * GET_MODE_SIZE (GET_MODE (elt));
- offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+ /* A value is to be stored in an insufficiently aligned
+ stack slot; copy via a suitably aligned slot if
+ necessary. */
+ size = GEN_INT (GET_MODE_SIZE (mode));
+ if (!MEM_P (xinner))
+ {
+ temp = assign_temp (type, 0, 1, 1);
+ emit_move_insn (temp, xinner);
+ xinner = temp;
+ }
}
- else
- offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
gcc_assert (size);
- used -= offset;
-
/* USED is now the # of bytes we need not copy to the stack
because registers will take care of them. */
int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
int i;
int not_stack;
- /* # words of start of argument
+ /* # bytes of start of argument
that we must make space for but need not store. */
- int offset = partial % (PARM_BOUNDARY / BITS_PER_WORD);
+ int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
int args_offset = INTVAL (args_so_far);
int skip;
offset = 0;
/* Now NOT_STACK gets the number of words that we don't need to
- allocate on the stack. */
- not_stack = partial - offset;
+ allocate on the stack. Convert OFFSET to words too. */
+ not_stack = (partial - offset) / UNITS_PER_WORD;
+ offset /= UNITS_PER_WORD;
/* If the partial register-part of the arg counts in its stack size,
skip the part of stack space corresponding to the registers.
if (GET_CODE (reg) == PARALLEL)
emit_group_load (reg, x, type, -1);
else
- move_block_to_reg (REGNO (reg), x, partial, mode);
+ {
+ gcc_assert (partial % UNITS_PER_WORD == 0);
+ move_block_to_reg (REGNO (reg), x, partial / UNITS_PER_WORD, mode);
+ }
}
if (extra && args_addr == 0 && where_pad == stack_direction)
? 0 : x);
}
-/* Expand an assignment that stores the value of FROM into TO.
- If WANT_VALUE is nonzero, return an rtx for the value of TO.
- (If the value is constant, this rtx is a constant.)
- Otherwise, the returned value is NULL_RTX. */
+/* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
+ FIELD is a bitfield. Returns true if the optimization was successful,
+ and there's nothing else to do. */
-rtx
-expand_assignment (tree to, tree from, int want_value)
+static bool
+optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitpos,
+ enum machine_mode mode1, rtx str_rtx,
+ tree to, tree src)
+{
+ enum machine_mode str_mode = GET_MODE (str_rtx);
+ unsigned int str_bitsize = GET_MODE_BITSIZE (str_mode);
+ tree op0, op1;
+ rtx value, result;
+ optab binop;
+
+ if (mode1 != VOIDmode
+ || bitsize >= BITS_PER_WORD
+ || str_bitsize > BITS_PER_WORD
+ || TREE_SIDE_EFFECTS (to)
+ || TREE_THIS_VOLATILE (to))
+ return false;
+
+ STRIP_NOPS (src);
+ if (!BINARY_CLASS_P (src)
+ || TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ return false;
+
+ op0 = TREE_OPERAND (src, 0);
+ op1 = TREE_OPERAND (src, 1);
+ STRIP_NOPS (op0);
+
+ if (!operand_equal_p (to, op0, 0))
+ return false;
+
+ if (MEM_P (str_rtx))
+ {
+ unsigned HOST_WIDE_INT offset1;
+
+ if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
+ str_mode = word_mode;
+ str_mode = get_best_mode (bitsize, bitpos,
+ MEM_ALIGN (str_rtx), str_mode, 0);
+ if (str_mode == VOIDmode)
+ return false;
+ str_bitsize = GET_MODE_BITSIZE (str_mode);
+
+ offset1 = bitpos;
+ bitpos %= str_bitsize;
+ offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
+ str_rtx = adjust_address (str_rtx, str_mode, offset1);
+ }
+ else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
+ return false;
+
+ /* If the bit field covers the whole REG/MEM, store_field
+ will likely generate better code. */
+ if (bitsize >= str_bitsize)
+ return false;
+
+ /* We can't handle fields split across multiple entities. */
+ if (bitpos + bitsize > str_bitsize)
+ return false;
+
+ if (BYTES_BIG_ENDIAN)
+ bitpos = str_bitsize - bitpos - bitsize;
+
+ switch (TREE_CODE (src))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ /* For now, just optimize the case of the topmost bitfield
+ where we don't need to do any masking and also
+ 1 bit bitfields where xor can be used.
+ We might win by one instruction for the other bitfields
+ too if insv/extv instructions aren't used, so that
+ can be added later. */
+ if (bitpos + bitsize != str_bitsize
+ && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
+ break;
+
+ value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
+ value = convert_modes (str_mode,
+ TYPE_MODE (TREE_TYPE (op1)), value,
+ TYPE_UNSIGNED (TREE_TYPE (op1)));
+
+ /* We may be accessing data outside the field, which means
+ we can alias adjacent data. */
+ if (MEM_P (str_rtx))
+ {
+ str_rtx = shallow_copy_rtx (str_rtx);
+ set_mem_alias_set (str_rtx, 0);
+ set_mem_expr (str_rtx, 0);
+ }
+
+ binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ if (bitsize == 1 && bitpos + bitsize != str_bitsize)
+ {
+ value = expand_and (str_mode, value, const1_rtx, NULL);
+ binop = xor_optab;
+ }
+ value = expand_shift (LSHIFT_EXPR, str_mode, value,
+ build_int_cst (NULL_TREE, bitpos),
+ NULL_RTX, 1);
+ result = expand_binop (str_mode, binop, str_rtx,
+ value, str_rtx, 1, OPTAB_WIDEN);
+ if (result != str_rtx)
+ emit_move_insn (str_rtx, result);
+ return true;
+
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (TREE_CODE (op1) != INTEGER_CST)
+ break;
+ value = expand_expr (op1, NULL_RTX, GET_MODE (str_rtx), EXPAND_NORMAL);
+ value = convert_modes (GET_MODE (str_rtx),
+ TYPE_MODE (TREE_TYPE (op1)), value,
+ TYPE_UNSIGNED (TREE_TYPE (op1)));
+
+ /* We may be accessing data outside the field, which means
+ we can alias adjacent data. */
+ if (MEM_P (str_rtx))
+ {
+ str_rtx = shallow_copy_rtx (str_rtx);
+ set_mem_alias_set (str_rtx, 0);
+ set_mem_expr (str_rtx, 0);
+ }
+
+ binop = TREE_CODE (src) == BIT_IOR_EXPR ? ior_optab : xor_optab;
+ if (bitpos + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx)))
+ {
+ rtx mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << bitsize)
+ - 1);
+ value = expand_and (GET_MODE (str_rtx), value, mask,
+ NULL_RTX);
+ }
+ value = expand_shift (LSHIFT_EXPR, GET_MODE (str_rtx), value,
+ build_int_cst (NULL_TREE, bitpos),
+ NULL_RTX, 1);
+ result = expand_binop (GET_MODE (str_rtx), binop, str_rtx,
+ value, str_rtx, 1, OPTAB_WIDEN);
+ if (result != str_rtx)
+ emit_move_insn (str_rtx, result);
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+
+/* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
+ is true, try generating a nontemporal store. */
+
+void
+expand_assignment (tree to, tree from, bool nontemporal)
{
rtx to_rtx = 0;
rtx result;
/* Don't crash if the lhs of the assignment was erroneous. */
-
if (TREE_CODE (to) == ERROR_MARK)
{
- result = expand_expr (from, NULL_RTX, VOIDmode, 0);
- return want_value ? result : NULL_RTX;
+ result = expand_normal (from);
+ return;
}
+ /* Optimize away no-op moves without side-effects. */
+ if (operand_equal_p (to, from, 0))
+ return;
+
/* Assignment of a structure component needs special treatment
if the structure component's rtx is not simply a MEM.
Assignment of an array element at a constant index, and assignment of
an array element in an unaligned packed structure field, has the same
problem. */
-
- if (TREE_CODE (to) == COMPONENT_REF || TREE_CODE (to) == BIT_FIELD_REF
- || TREE_CODE (to) == ARRAY_REF || TREE_CODE (to) == ARRAY_RANGE_REF
+ if (handled_component_p (to)
|| TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
{
enum machine_mode mode1;
HOST_WIDE_INT bitsize, bitpos;
- rtx orig_to_rtx;
tree offset;
int unsignedp;
int volatilep = 0;
push_temp_slots ();
tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
- &unsignedp, &volatilep);
+ &unsignedp, &volatilep, true);
/* If we are going to use store_bit_field and extract_bit_field,
make sure to_rtx will be safe for multiple use. */
- if (mode1 == VOIDmode && want_value)
- tem = stabilize_reference (tem);
-
- orig_to_rtx = to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, 0);
+ to_rtx = expand_normal (tem);
if (offset != 0)
{
- rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
+ rtx offset_rtx;
- gcc_assert (MEM_P (to_rtx));
+ if (!MEM_P (to_rtx))
+ {
+ /* We can get constant negative offsets into arrays with broken
+ user code. Translate this to a trap instead of ICEing. */
+ gcc_assert (TREE_CODE (offset) == INTEGER_CST);
+ expand_builtin_trap ();
+ to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
+ }
+ offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
#ifdef POINTERS_EXTEND_UNSIGNED
if (GET_MODE (offset_rtx) != Pmode)
offset_rtx = convert_to_mode (Pmode, offset_rtx, 0);
offset));
}
- if (MEM_P (to_rtx))
+ /* Handle expand_expr of a complex value returning a CONCAT. */
+ if (GET_CODE (to_rtx) == CONCAT)
{
- /* If the field is at offset zero, we could have been given the
- DECL_RTX of the parent struct. Don't munge it. */
- to_rtx = shallow_copy_rtx (to_rtx);
-
- set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
- }
-
- /* Deal with volatile and readonly fields. The former is only done
- for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
- if (volatilep && MEM_P (to_rtx))
- {
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- MEM_VOLATILE_P (to_rtx) = 1;
- }
-
- if (MEM_P (to_rtx) && ! can_address_p (to))
- {
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
+ if (TREE_CODE (TREE_TYPE (from)) == COMPLEX_TYPE)
+ {
+ gcc_assert (bitpos == 0);
+ result = store_expr (from, to_rtx, false, nontemporal);
+ }
+ else
+ {
+ gcc_assert (bitpos == 0 || bitpos == GET_MODE_BITSIZE (mode1));
+ result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
+ nontemporal);
+ }
}
-
- /* Optimize bitfld op= val in certain cases. */
- while (mode1 == VOIDmode && !want_value
- && bitsize > 0 && bitsize < BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (to_rtx)) <= BITS_PER_WORD
- && !TREE_SIDE_EFFECTS (to)
- && !TREE_THIS_VOLATILE (to))
+ else
{
- tree src, op0, op1;
- rtx value, str_rtx = to_rtx;
- HOST_WIDE_INT bitpos1 = bitpos;
- optab binop;
-
- src = from;
- STRIP_NOPS (src);
- if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE
- || TREE_CODE_CLASS (TREE_CODE (src)) != '2')
- break;
-
- op0 = TREE_OPERAND (src, 0);
- op1 = TREE_OPERAND (src, 1);
- STRIP_NOPS (op0);
-
- if (! operand_equal_p (to, op0, 0))
- break;
-
- if (MEM_P (str_rtx))
+ if (MEM_P (to_rtx))
{
- enum machine_mode mode = GET_MODE (str_rtx);
- HOST_WIDE_INT offset1;
-
- if (GET_MODE_BITSIZE (mode) == 0
- || GET_MODE_BITSIZE (mode) > BITS_PER_WORD)
- mode = word_mode;
- mode = get_best_mode (bitsize, bitpos1, MEM_ALIGN (str_rtx),
- mode, 0);
- if (mode == VOIDmode)
- break;
-
- offset1 = bitpos1;
- bitpos1 %= GET_MODE_BITSIZE (mode);
- offset1 = (offset1 - bitpos1) / BITS_PER_UNIT;
- str_rtx = adjust_address (str_rtx, mode, offset1);
- }
- else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
- break;
-
- /* If the bit field covers the whole REG/MEM, store_field
- will likely generate better code. */
- if (bitsize >= GET_MODE_BITSIZE (GET_MODE (str_rtx)))
- break;
+ /* If the field is at offset zero, we could have been given the
+ DECL_RTX of the parent struct. Don't munge it. */
+ to_rtx = shallow_copy_rtx (to_rtx);
- /* We can't handle fields split across multiple entities. */
- if (bitpos1 + bitsize > GET_MODE_BITSIZE (GET_MODE (str_rtx)))
- break;
+ set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
- if (BYTES_BIG_ENDIAN)
- bitpos1 = GET_MODE_BITSIZE (GET_MODE (str_rtx)) - bitpos1
- - bitsize;
-
- /* Special case some bitfield op= exp. */
- switch (TREE_CODE (src))
- {
- case PLUS_EXPR:
- case MINUS_EXPR:
- /* For now, just optimize the case of the topmost bitfield
- where we don't need to do any masking and also
- 1 bit bitfields where xor can be used.
- We might win by one instruction for the other bitfields
- too if insv/extv instructions aren't used, so that
- can be added later. */
- if (bitpos1 + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx))
- && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
- break;
- value = expand_expr (op1, NULL_RTX, GET_MODE (str_rtx), 0);
- value = convert_modes (GET_MODE (str_rtx),
- TYPE_MODE (TREE_TYPE (op1)), value,
- TYPE_UNSIGNED (TREE_TYPE (op1)));
-
- /* We may be accessing data outside the field, which means
- we can alias adjacent data. */
- if (MEM_P (str_rtx))
- {
- str_rtx = shallow_copy_rtx (str_rtx);
- set_mem_alias_set (str_rtx, 0);
- set_mem_expr (str_rtx, 0);
- }
-
- binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
- if (bitsize == 1
- && bitpos1 + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx)))
- {
- value = expand_and (GET_MODE (str_rtx), value, const1_rtx,
- NULL_RTX);
- binop = xor_optab;
- }
- value = expand_shift (LSHIFT_EXPR, GET_MODE (str_rtx), value,
- build_int_cst (NULL_TREE, bitpos1),
- NULL_RTX, 1);
- result = expand_binop (GET_MODE (str_rtx), binop, str_rtx,
- value, str_rtx, 1, OPTAB_WIDEN);
- if (result != str_rtx)
- emit_move_insn (str_rtx, result);
- free_temp_slots ();
- pop_temp_slots ();
- return NULL_RTX;
-
- default:
- break;
+ /* Deal with volatile and readonly fields. The former is only
+ done for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
+ if (volatilep)
+ MEM_VOLATILE_P (to_rtx) = 1;
+ if (component_uses_parent_alias_set (to))
+ MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
- break;
+ if (optimize_bitfield_assignment_op (bitsize, bitpos, mode1,
+ to_rtx, to, from))
+ result = NULL;
+ else
+ result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
}
- result = store_field (to_rtx, bitsize, bitpos, mode1, from,
- (want_value
- /* Spurious cast for HPUX compiler. */
- ? ((enum machine_mode)
- TYPE_MODE (TREE_TYPE (to)))
- : VOIDmode),
- unsignedp, TREE_TYPE (tem), get_alias_set (to));
-
- preserve_temp_slots (result);
+ if (result)
+ preserve_temp_slots (result);
free_temp_slots ();
pop_temp_slots ();
-
- /* If the value is meaningful, convert RESULT to the proper mode.
- Otherwise, return nothing. */
- return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)),
- TYPE_MODE (TREE_TYPE (from)),
- result,
- TYPE_UNSIGNED (TREE_TYPE (to)))
- : NULL_RTX);
+ return;
}
/* If the rhs is a function call and its value is not an aggregate,
rtx value;
push_temp_slots ();
- value = expand_expr (from, NULL_RTX, VOIDmode, 0);
+ value = expand_normal (from);
if (to_rtx == 0)
to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
preserve_temp_slots (to_rtx);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? to_rtx : NULL_RTX;
+ return;
}
/* Ordinary treatment. Expand TO to get a REG or MEM rtx.
rtx temp;
push_temp_slots ();
- temp = expand_expr (from, 0, GET_MODE (to_rtx), 0);
+ temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
if (GET_CODE (to_rtx) == PARALLEL)
emit_group_load (to_rtx, temp, TREE_TYPE (from),
preserve_temp_slots (to_rtx);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? to_rtx : NULL_RTX;
+ return;
}
/* In case we are returning the contents of an object which overlaps
push_temp_slots ();
size = expr_size (from);
- from_rtx = expand_expr (from, NULL_RTX, VOIDmode, 0);
+ from_rtx = expand_normal (from);
emit_library_call (memmove_libfunc, LCT_NORMAL,
VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
preserve_temp_slots (to_rtx);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? to_rtx : NULL_RTX;
+ return;
}
/* Compute FROM and store the value in the rtx we got. */
push_temp_slots ();
- result = store_expr (from, to_rtx, want_value);
+ result = store_expr (from, to_rtx, 0, nontemporal);
preserve_temp_slots (result);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? result : NULL_RTX;
+ return;
+}
+
+/* Emits nontemporal store insn that moves FROM to TO. Returns true if this
+ succeeded, false otherwise. */
+
+static bool
+emit_storent_insn (rtx to, rtx from)
+{
+ enum machine_mode mode = GET_MODE (to), imode;
+ enum insn_code code = storent_optab->handlers[mode].insn_code;
+ rtx pattern;
+
+ if (code == CODE_FOR_nothing)
+ return false;
+
+ imode = insn_data[code].operand[0].mode;
+ if (!insn_data[code].operand[0].predicate (to, imode))
+ return false;
+
+ imode = insn_data[code].operand[1].mode;
+ if (!insn_data[code].operand[1].predicate (from, imode))
+ {
+ from = copy_to_mode_reg (imode, from);
+ if (!insn_data[code].operand[1].predicate (from, imode))
+ return false;
+ }
+
+ pattern = GEN_FCN (code) (to, from);
+ if (pattern == NULL_RTX)
+ return false;
+
+ emit_insn (pattern);
+ return true;
}
/* Generate code for computing expression EXP,
and storing the value into TARGET.
- If WANT_VALUE & 1 is nonzero, return a copy of the value
- not in TARGET, so that we can be sure to use the proper
- value in a containing expression even if TARGET has something
- else stored in it. If possible, we copy the value through a pseudo
- and return that pseudo. Or, if the value is constant, we try to
- return the constant. In some cases, we return a pseudo
- copied *from* TARGET.
-
If the mode is BLKmode then we may return TARGET itself.
It turns out that in BLKmode it doesn't cause a problem.
because C has no operators that could combine two different
with no sequence point. Will other languages need this to
be more thorough?
- If WANT_VALUE & 1 is 0, we return NULL, to make sure
- to catch quickly any cases where the caller uses the value
- and fails to set WANT_VALUE.
-
- If WANT_VALUE & 2 is set, this is a store into a call param on the
- stack, and block moves may need to be treated specially. */
+ If CALL_PARAM_P is nonzero, this is a store into a call param on the
+ stack, and block moves may need to be treated specially.
+
+ If NONTEMPORAL is true, try using a nontemporal store instruction. */
rtx
-store_expr (tree exp, rtx target, int want_value)
+store_expr (tree exp, rtx target, int call_param_p, bool nontemporal)
{
rtx temp;
rtx alt_rtl = NULL_RTX;
int dont_return_target = 0;
- int dont_store_target = 0;
if (VOID_TYPE_P (TREE_TYPE (exp)))
{
/* C++ can generate ?: expressions with a throw expression in one
branch and an rvalue in the other. Here, we resolve attempts to
store the throw expression's nonexistent result. */
- gcc_assert (!want_value);
- expand_expr (exp, const0_rtx, VOIDmode, 0);
+ gcc_assert (!call_param_p);
+ expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
return NULL_RTX;
}
if (TREE_CODE (exp) == COMPOUND_EXPR)
/* Perform first part of compound expression, then assign from second
part. */
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
- return store_expr (TREE_OPERAND (exp, 1), target, want_value);
+ call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
+ return store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
+ nontemporal);
}
else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
{
do_pending_stack_adjust ();
NO_DEFER_POP;
jumpifnot (TREE_OPERAND (exp, 0), lab1);
- store_expr (TREE_OPERAND (exp, 1), target, want_value & 2);
+ store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
+ nontemporal);
emit_jump_insn (gen_jump (lab2));
emit_barrier ();
emit_label (lab1);
- store_expr (TREE_OPERAND (exp, 2), target, want_value & 2);
+ store_expr (TREE_OPERAND (exp, 2), target, call_param_p,
+ nontemporal);
emit_label (lab2);
OK_DEFER_POP;
- return want_value & 1 ? target : NULL_RTX;
- }
- else if ((want_value & 1) != 0
- && MEM_P (target)
- && ! MEM_VOLATILE_P (target)
- && GET_MODE (target) != BLKmode)
- /* If target is in memory and caller wants value in a register instead,
- arrange that. Pass TARGET as target for expand_expr so that,
- if EXP is another assignment, WANT_VALUE will be nonzero for it.
- We know expand_expr will not use the target in that case.
- Don't do this if TARGET is volatile because we are supposed
- to write it and then read it. */
- {
- temp = expand_expr (exp, target, GET_MODE (target),
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
- if (GET_MODE (temp) != BLKmode && GET_MODE (temp) != VOIDmode)
- {
- /* If TEMP is already in the desired TARGET, only copy it from
- memory and don't store it there again. */
- if (temp == target
- || (rtx_equal_p (temp, target)
- && ! side_effects_p (temp) && ! side_effects_p (target)))
- dont_store_target = 1;
- temp = copy_to_reg (temp);
- }
- dont_return_target = 1;
+ return NULL_RTX;
}
else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
/* If this is a scalar in a register that is stored in a wider mode
{
rtx inner_target = 0;
- /* If we don't want a value, we can do the conversion inside EXP,
- which will often result in some optimizations. Do the conversion
- in two steps: first change the signedness, if needed, then
- the extend. But don't do this if the type of EXP is a subtype
- of something else since then the conversion might involve
- more than just converting modes. */
- if ((want_value & 1) == 0
- && INTEGRAL_TYPE_P (TREE_TYPE (exp))
+ /* We can do the conversion inside EXP, which will often result
+ in some optimizations. Do the conversion in two steps: first
+ change the signedness, if needed, then the extend. But don't
+ do this if the type of EXP is a subtype of something else
+ since then the conversion might involve more than just
+ converting modes. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
&& TREE_TYPE (TREE_TYPE (exp)) == 0
&& (!lang_hooks.reduce_bit_field_operations
|| (GET_MODE_PRECISION (GET_MODE (target))
{
if (TYPE_UNSIGNED (TREE_TYPE (exp))
!= SUBREG_PROMOTED_UNSIGNED_P (target))
- exp = convert
- (lang_hooks.types.signed_or_unsigned_type
- (SUBREG_PROMOTED_UNSIGNED_P (target), TREE_TYPE (exp)), exp);
+ {
+ /* Some types, e.g. Fortran's logical*4, won't have a signed
+ version, so use the mode instead. */
+ tree ntype
+ = (signed_or_unsigned_type_for
+ (SUBREG_PROMOTED_UNSIGNED_P (target), TREE_TYPE (exp)));
+ if (ntype == NULL)
+ ntype = lang_hooks.types.type_for_mode
+ (TYPE_MODE (TREE_TYPE (exp)),
+ SUBREG_PROMOTED_UNSIGNED_P (target));
+
+ exp = fold_convert (ntype, exp);
+ }
- exp = convert (lang_hooks.types.type_for_mode
- (GET_MODE (SUBREG_REG (target)),
- SUBREG_PROMOTED_UNSIGNED_P (target)),
- exp);
+ exp = fold_convert (lang_hooks.types.type_for_mode
+ (GET_MODE (SUBREG_REG (target)),
+ SUBREG_PROMOTED_UNSIGNED_P (target)),
+ exp);
inner_target = SUBREG_REG (target);
}
temp = expand_expr (exp, inner_target, VOIDmode,
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
-
- /* If TEMP is a MEM and we want a result value, make the access
- now so it gets done only once. Strictly speaking, this is
- only necessary if the MEM is volatile, or if the address
- overlaps TARGET. But not performing the load twice also
- reduces the amount of rtl we generate and then have to CSE. */
- if (MEM_P (temp) && (want_value & 1) != 0)
- temp = copy_to_reg (temp);
+ call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
/* If TEMP is a VOIDmode constant, use convert_modes to make
sure that we properly convert it. */
convert_move (SUBREG_REG (target), temp,
SUBREG_PROMOTED_UNSIGNED_P (target));
- /* If we promoted a constant, change the mode back down to match
- target. Otherwise, the caller might get confused by a result whose
- mode is larger than expected. */
-
- if ((want_value & 1) != 0 && GET_MODE (temp) != GET_MODE (target))
- {
- if (GET_MODE (temp) != VOIDmode)
- {
- temp = gen_lowpart_SUBREG (GET_MODE (target), temp);
- SUBREG_PROMOTED_VAR_P (temp) = 1;
- SUBREG_PROMOTED_UNSIGNED_SET (temp,
- SUBREG_PROMOTED_UNSIGNED_P (target));
- }
- else
- temp = convert_modes (GET_MODE (target),
- GET_MODE (SUBREG_REG (target)),
- temp, SUBREG_PROMOTED_UNSIGNED_P (target));
- }
-
- return want_value & 1 ? temp : NULL_RTX;
+ return NULL_RTX;
}
else
{
- temp = expand_expr_real (exp, target, GET_MODE (target),
- (want_value & 2
+ rtx tmp_target;
+
+ /* If we want to use a nontemporal store, force the value to
+ register first. */
+ tmp_target = nontemporal ? NULL_RTX : target;
+ temp = expand_expr_real (exp, tmp_target, GET_MODE (target),
+ (call_param_p
? EXPAND_STACK_PARM : EXPAND_NORMAL),
&alt_rtl);
/* Return TARGET if it's a specified hardware register.
&& REGNO (target) < FIRST_PSEUDO_REGISTER)
&& !(MEM_P (target) && MEM_VOLATILE_P (target))
&& ! rtx_equal_p (temp, target)
- && (CONSTANT_P (temp) || (want_value & 1) != 0))
+ && CONSTANT_P (temp))
dont_return_target = 1;
}
|| (temp != target && (side_effects_p (temp)
|| side_effects_p (target))))
&& TREE_CODE (exp) != ERROR_MARK
- && ! dont_store_target
/* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
but TARGET is not valid memory reference, TEMP will differ
from TARGET although it is really the same location. */
&& !(alt_rtl && rtx_equal_p (alt_rtl, target))
- /* If there's nothing to copy, don't bother. Don't call expr_size
- unless necessary, because some front-ends (C++) expr_size-hook
- aborts on objects that are not supposed to be bit-copied or
- bit-initialized. */
+ /* If there's nothing to copy, don't bother. Don't call
+ expr_size unless necessary, because some front-ends (C++)
+ expr_size-hook must not be given objects that are not
+ supposed to be bit-copied or bit-initialized. */
&& expr_size (exp) != const0_rtx)
{
if (GET_MODE (temp) != GET_MODE (target)
temp = convert_to_mode (GET_MODE (target), temp, unsignedp);
emit_move_insn (target, temp);
}
+ else if (GET_MODE (target) == BLKmode)
+ emit_block_move (target, temp, expr_size (exp),
+ (call_param_p
+ ? BLOCK_OP_CALL_PARM
+ : BLOCK_OP_NORMAL));
else
convert_move (target, temp, unsignedp);
}
if (GET_CODE (size) == CONST_INT
&& INTVAL (size) < TREE_STRING_LENGTH (exp))
emit_block_move (target, temp, size,
- (want_value & 2
+ (call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
else
{
size_int (TREE_STRING_LENGTH (exp)));
rtx copy_size_rtx
= expand_expr (copy_size, NULL_RTX, VOIDmode,
- (want_value & 2
+ (call_param_p
? EXPAND_STACK_PARM : EXPAND_NORMAL));
rtx label = 0;
copy_size_rtx = convert_to_mode (ptr_mode, copy_size_rtx,
TYPE_UNSIGNED (sizetype));
emit_block_move (target, temp, copy_size_rtx,
- (want_value & 2
+ (call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
/* Figure out how much is left in TARGET that we have to clear.
}
if (size != const0_rtx)
- clear_storage (target, size);
+ clear_storage (target, size, BLOCK_OP_NORMAL);
if (label)
emit_label (label);
int_size_in_bytes (TREE_TYPE (exp)));
else if (GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
- (want_value & 2
+ (call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
+ else if (nontemporal
+ && emit_storent_insn (target, temp))
+ /* If we managed to emit a nontemporal store, there is nothing else to
+ do. */
+ ;
else
{
temp = force_operand (temp, target);
}
}
- /* If we don't want a value, return NULL_RTX. */
- if ((want_value & 1) == 0)
- return NULL_RTX;
-
- /* If we are supposed to return TEMP, do so as long as it isn't a MEM.
- ??? The latter test doesn't seem to make sense. */
- else if (dont_return_target && !MEM_P (temp))
- return temp;
-
- /* Return TARGET itself if it is a hard register. */
- else if ((want_value & 1) != 0
- && GET_MODE (target) != BLKmode
- && ! (REG_P (target)
- && REGNO (target) < FIRST_PSEUDO_REGISTER))
- return copy_to_reg (target);
-
- else
- return target;
+ return NULL_RTX;
}
\f
-/* Examine CTOR. Discover how many scalar fields are set to nonzero
- values and place it in *P_NZ_ELTS. Discover how many scalar fields
- are set to non-constant values and place it in *P_NC_ELTS. */
+/* Helper for categorize_ctor_elements. Identical interface. */
-static void
+static bool
categorize_ctor_elements_1 (tree ctor, HOST_WIDE_INT *p_nz_elts,
- HOST_WIDE_INT *p_nc_elts)
+ HOST_WIDE_INT *p_elt_count,
+ bool *p_must_clear)
{
- HOST_WIDE_INT nz_elts, nc_elts;
- tree list;
+ unsigned HOST_WIDE_INT idx;
+ HOST_WIDE_INT nz_elts, elt_count;
+ tree value, purpose;
+
+ /* Whether CTOR is a valid constant initializer, in accordance with what
+ initializer_constant_valid_p does. If inferred from the constructor
+ elements, true until proven otherwise. */
+ bool const_from_elts_p = constructor_static_from_elts_p (ctor);
+ bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
nz_elts = 0;
- nc_elts = 0;
+ elt_count = 0;
- for (list = CONSTRUCTOR_ELTS (ctor); list; list = TREE_CHAIN (list))
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
{
- tree value = TREE_VALUE (list);
- tree purpose = TREE_PURPOSE (list);
HOST_WIDE_INT mult;
mult = 1;
{
case CONSTRUCTOR:
{
- HOST_WIDE_INT nz = 0, nc = 0;
- categorize_ctor_elements_1 (value, &nz, &nc);
+ HOST_WIDE_INT nz = 0, ic = 0;
+
+ bool const_elt_p
+ = categorize_ctor_elements_1 (value, &nz, &ic, p_must_clear);
+
nz_elts += mult * nz;
- nc_elts += mult * nc;
+ elt_count += mult * ic;
+
+ if (const_from_elts_p && const_p)
+ const_p = const_elt_p;
}
break;
case REAL_CST:
if (!initializer_zerop (value))
nz_elts += mult;
+ elt_count += mult;
+ break;
+
+ case STRING_CST:
+ nz_elts += mult * TREE_STRING_LENGTH (value);
+ elt_count += mult * TREE_STRING_LENGTH (value);
break;
+
case COMPLEX_CST:
if (!initializer_zerop (TREE_REALPART (value)))
nz_elts += mult;
if (!initializer_zerop (TREE_IMAGPART (value)))
nz_elts += mult;
+ elt_count += mult;
break;
+
case VECTOR_CST:
{
tree v;
for (v = TREE_VECTOR_CST_ELTS (value); v; v = TREE_CHAIN (v))
- if (!initializer_zerop (TREE_VALUE (v)))
- nz_elts += mult;
+ {
+ if (!initializer_zerop (TREE_VALUE (v)))
+ nz_elts += mult;
+ elt_count += mult;
+ }
}
break;
default:
nz_elts += mult;
- if (!initializer_constant_valid_p (value, TREE_TYPE (value)))
- nc_elts += mult;
+ elt_count += mult;
+
+ if (const_from_elts_p && const_p)
+ const_p = initializer_constant_valid_p (value, TREE_TYPE (value))
+ != NULL_TREE;
break;
}
}
+ if (!*p_must_clear
+ && (TREE_CODE (TREE_TYPE (ctor)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (ctor)) == QUAL_UNION_TYPE))
+ {
+ tree init_sub_type;
+ bool clear_this = true;
+
+ if (!VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (ctor)))
+ {
+ /* We don't expect more than one element of the union to be
+ initialized. Not sure what we should do otherwise... */
+ gcc_assert (VEC_length (constructor_elt, CONSTRUCTOR_ELTS (ctor))
+ == 1);
+
+ init_sub_type = TREE_TYPE (VEC_index (constructor_elt,
+ CONSTRUCTOR_ELTS (ctor),
+ 0)->value);
+
+ /* ??? We could look at each element of the union, and find the
+ largest element. Which would avoid comparing the size of the
+ initialized element against any tail padding in the union.
+ Doesn't seem worth the effort... */
+ if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (ctor)),
+ TYPE_SIZE (init_sub_type)) == 1)
+ {
+ /* And now we have to find out if the element itself is fully
+ constructed. E.g. for union { struct { int a, b; } s; } u
+ = { .s = { .a = 1 } }. */
+ if (elt_count == count_type_elements (init_sub_type, false))
+ clear_this = false;
+ }
+ }
+
+ *p_must_clear = clear_this;
+ }
+
*p_nz_elts += nz_elts;
- *p_nc_elts += nc_elts;
+ *p_elt_count += elt_count;
+
+ return const_p;
}
-void
+/* Examine CTOR to discover:
+ * how many scalar fields are set to nonzero values,
+ and place it in *P_NZ_ELTS;
+ * how many scalar fields in total are in CTOR,
+ and place it in *P_ELT_COUNT.
+ * if a type is a union, and the initializer from the constructor
+ is not the largest element in the union, then set *p_must_clear.
+
+ Return whether or not CTOR is a valid static constant initializer, the same
+ as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
+
+bool
categorize_ctor_elements (tree ctor, HOST_WIDE_INT *p_nz_elts,
- HOST_WIDE_INT *p_nc_elts)
+ HOST_WIDE_INT *p_elt_count,
+ bool *p_must_clear)
{
*p_nz_elts = 0;
- *p_nc_elts = 0;
- categorize_ctor_elements_1 (ctor, p_nz_elts, p_nc_elts);
+ *p_elt_count = 0;
+ *p_must_clear = false;
+
+ return
+ categorize_ctor_elements_1 (ctor, p_nz_elts, p_elt_count, p_must_clear);
}
/* Count the number of scalars in TYPE. Return -1 on overflow or
- variable-sized. */
+ variable-sized. If ALLOW_FLEXARR is true, don't count flexible
+ array member at the end of the structure. */
HOST_WIDE_INT
-count_type_elements (tree type)
+count_type_elements (tree type, bool allow_flexarr)
{
const HOST_WIDE_INT max = ~((HOST_WIDE_INT)1 << (HOST_BITS_PER_WIDE_INT-1));
switch (TREE_CODE (type))
if (telts && host_integerp (telts, 1))
{
HOST_WIDE_INT n = tree_low_cst (telts, 1) + 1;
- HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type));
+ HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type), false);
if (n == 0)
return 0;
else if (max / n > m)
for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
if (TREE_CODE (f) == FIELD_DECL)
{
- t = count_type_elements (TREE_TYPE (f));
+ t = count_type_elements (TREE_TYPE (f), false);
if (t < 0)
- return -1;
+ {
+ /* Check for structures with flexible array member. */
+ tree tf = TREE_TYPE (f);
+ if (allow_flexarr
+ && TREE_CHAIN (f) == NULL
+ && TREE_CODE (tf) == ARRAY_TYPE
+ && TYPE_DOMAIN (tf)
+ && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
+ && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
+ && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
+ && int_size_in_bytes (type) >= 0)
+ break;
+
+ return -1;
+ }
n += t;
}
case REAL_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
- case CHAR_TYPE:
case POINTER_TYPE:
case OFFSET_TYPE:
case REFERENCE_TYPE:
case VOID_TYPE:
case METHOD_TYPE:
- case FILE_TYPE:
- case SET_TYPE:
case FUNCTION_TYPE:
case LANG_TYPE:
default:
/* Return 1 if EXP contains mostly (3/4) zeros. */
-int
+static int
mostly_zeros_p (tree exp)
{
if (TREE_CODE (exp) == CONSTRUCTOR)
{
- HOST_WIDE_INT nz_elts, nc_elts, elts;
+ HOST_WIDE_INT nz_elts, count, elts;
+ bool must_clear;
- /* If there are no ranges of true bits, it is all zero. */
- if (TREE_TYPE (exp) && TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
- return CONSTRUCTOR_ELTS (exp) == NULL_TREE;
+ categorize_ctor_elements (exp, &nz_elts, &count, &must_clear);
+ if (must_clear)
+ return 1;
- categorize_ctor_elements (exp, &nz_elts, &nc_elts);
- elts = count_type_elements (TREE_TYPE (exp));
+ elts = count_type_elements (TREE_TYPE (exp), false);
return nz_elts < elts / 4;
}
return initializer_zerop (exp);
}
+
+/* Return 1 if EXP contains all zeros. */
+
+static int
+all_zeros_p (tree exp)
+{
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+
+ {
+ HOST_WIDE_INT nz_elts, count;
+ bool must_clear;
+
+ categorize_ctor_elements (exp, &nz_elts, &count, &must_clear);
+ return nz_elts == 0;
+ }
+
+ return initializer_zerop (exp);
+}
\f
/* Helper function for store_constructor.
TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
store_constructor (exp, target, cleared, bitsize / BITS_PER_UNIT);
}
else
- store_field (target, bitsize, bitpos, mode, exp, VOIDmode, 0, type,
- alias_set);
+ store_field (target, bitsize, bitpos, mode, exp, type, alias_set, false);
}
/* Store the value of constructor EXP into the rtx TARGET.
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
- tree elt;
+ unsigned HOST_WIDE_INT idx;
+ tree field, value;
/* If size is zero or the target is already cleared, do nothing. */
if (size == 0 || cleared)
&& ! CONSTRUCTOR_ELTS (exp))
/* If the constructor is empty, clear the union. */
{
- clear_storage (target, expr_size (exp));
+ clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
cleared = 1;
}
register whose mode size isn't equal to SIZE since
clear_storage can't handle this case. */
else if (size > 0
- && ((list_length (CONSTRUCTOR_ELTS (exp))
+ && (((int)VEC_length (constructor_elt, CONSTRUCTOR_ELTS (exp))
!= fields_length (type))
|| mostly_zeros_p (exp))
&& (!REG_P (target)
|| ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target))
== size)))
{
- clear_storage (target, GEN_INT (size));
+ clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
cleared = 1;
}
- if (! cleared)
+ if (REG_P (target) && !cleared)
emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
/* Store each element of the constructor into the
corresponding field of TARGET. */
-
- for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
{
- tree field = TREE_PURPOSE (elt);
- tree value = TREE_VALUE (elt);
enum machine_mode mode;
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos = 0;
tree offset;
rtx to_rtx = target;
-
+
/* Just ignore missing fields. We cleared the whole
structure, above, if any fields are missing. */
if (field == 0)
continue;
-
+
if (cleared && initializer_zerop (value))
continue;
-
+
if (host_integerp (DECL_SIZE (field), 1))
bitsize = tree_low_cst (DECL_SIZE (field), 1);
else
bitsize = -1;
-
+
mode = DECL_MODE (field);
if (DECL_BIT_FIELD (field))
mode = VOIDmode;
-
+
offset = DECL_FIELD_OFFSET (field);
if (host_integerp (offset, 0)
&& host_integerp (bit_position (field), 0))
}
else
bitpos = tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 0);
-
+
if (offset)
{
rtx offset_rtx;
-
+
offset
= SUBSTITUTE_PLACEHOLDER_IN_EXPR (offset,
make_tree (TREE_TYPE (exp),
target));
- offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+ offset_rtx = expand_normal (offset);
gcc_assert (MEM_P (to_rtx));
-
+
#ifdef POINTERS_EXTEND_UNSIGNED
if (GET_MODE (offset_rtx) != Pmode)
offset_rtx = convert_to_mode (Pmode, offset_rtx, 0);
&& bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
{
tree type = TREE_TYPE (value);
-
+
if (TYPE_PRECISION (type) < BITS_PER_WORD)
{
type = lang_hooks.types.type_for_size
(BITS_PER_WORD, TYPE_UNSIGNED (type));
- value = convert (type, value);
+ value = fold_convert (type, value);
}
-
+
if (BYTES_BIG_ENDIAN)
value
- = fold (build2 (LSHIFT_EXPR, type, value,
- build_int_cst (NULL_TREE,
- BITS_PER_WORD - bitsize)));
+ = fold_build2 (LSHIFT_EXPR, type, value,
+ build_int_cst (type,
+ BITS_PER_WORD - bitsize));
bitsize = BITS_PER_WORD;
mode = word_mode;
}
to_rtx = copy_rtx (to_rtx);
MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
-
+
store_constructor_field (to_rtx, bitsize, bitpos, mode,
value, type, cleared,
get_alias_set (TREE_TYPE (field)));
}
case ARRAY_TYPE:
{
- tree elt;
- int i;
+ tree value, index;
+ unsigned HOST_WIDE_INT i;
int need_to_clear;
tree domain;
tree elttype = TREE_TYPE (type);
need_to_clear = 1;
else
{
+ unsigned HOST_WIDE_INT idx;
+ tree index, value;
HOST_WIDE_INT count = 0, zero_count = 0;
need_to_clear = ! const_bounds_p;
-
+
/* This loop is a more accurate version of the loop in
mostly_zeros_p (it handles RANGE_EXPR in an index). It
is also needed to check for missing elements. */
- for (elt = CONSTRUCTOR_ELTS (exp);
- elt != NULL_TREE && ! need_to_clear;
- elt = TREE_CHAIN (elt))
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
{
- tree index = TREE_PURPOSE (elt);
HOST_WIDE_INT this_node_count;
-
+
+ if (need_to_clear)
+ break;
+
if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
{
tree lo_index = TREE_OPERAND (index, 0);
tree hi_index = TREE_OPERAND (index, 1);
-
+
if (! host_integerp (lo_index, 1)
|| ! host_integerp (hi_index, 1))
{
need_to_clear = 1;
break;
}
-
+
this_node_count = (tree_low_cst (hi_index, 1)
- tree_low_cst (lo_index, 1) + 1);
}
else
this_node_count = 1;
-
+
count += this_node_count;
- if (mostly_zeros_p (TREE_VALUE (elt)))
+ if (mostly_zeros_p (value))
zero_count += this_node_count;
}
-
+
/* Clear the entire array first if there are any missing
elements, or if the incidence of zero elements is >=
75%. */
|| 4 * zero_count >= 3 * count))
need_to_clear = 1;
}
-
+
if (need_to_clear && size > 0)
{
if (REG_P (target))
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
else
- clear_storage (target, GEN_INT (size));
+ clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
cleared = 1;
}
/* Store each element of the constructor into the
corresponding element of TARGET, determined by counting the
elements. */
- for (elt = CONSTRUCTOR_ELTS (exp), i = 0;
- elt;
- elt = TREE_CHAIN (elt), i++)
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
{
enum machine_mode mode;
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos;
int unsignedp;
- tree value = TREE_VALUE (elt);
- tree index = TREE_PURPOSE (elt);
rtx xtarget = target;
-
+
if (cleared && initializer_zerop (value))
continue;
-
+
unsignedp = TYPE_UNSIGNED (elttype);
mode = TYPE_MODE (elttype);
if (mode == BLKmode)
: -1);
else
bitsize = GET_MODE_BITSIZE (mode);
-
+
if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
{
tree lo_index = TREE_OPERAND (index, 0);
rtx index_r, pos_rtx;
HOST_WIDE_INT lo, hi, count;
tree position;
-
+
/* If the range is constant and "small", unroll the loop. */
if (const_bounds_p
&& host_integerp (lo_index, 0)
for (; lo <= hi; lo++)
{
bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0);
-
+
if (MEM_P (target)
&& !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
target = copy_rtx (target);
MEM_KEEP_ALIAS_SET_P (target) = 1;
}
-
+
store_constructor_field
(target, bitsize, bitpos, mode, value, type, cleared,
get_alias_set (elttype));
rtx loop_start = gen_label_rtx ();
rtx loop_end = gen_label_rtx ();
tree exit_cond;
-
- expand_expr (hi_index, NULL_RTX, VOIDmode, 0);
+
+ expand_normal (hi_index);
unsignedp = TYPE_UNSIGNED (domain);
-
+
index = build_decl (VAR_DECL, NULL_TREE, domain);
-
+
index_r
= gen_reg_rtx (promote_mode (domain, DECL_MODE (index),
&unsignedp, 0));
SET_DECL_RTL (index, index_r);
- store_expr (lo_index, index_r, 0);
-
+ store_expr (lo_index, index_r, 0, false);
+
/* Build the head of the loop. */
do_pending_stack_adjust ();
emit_label (loop_start);
/* Assign value to element index. */
- position
- = convert (ssizetype,
- fold (build2 (MINUS_EXPR, TREE_TYPE (index),
- index, TYPE_MIN_VALUE (domain))));
- position = size_binop (MULT_EXPR, position,
- convert (ssizetype,
- TYPE_SIZE_UNIT (elttype)));
-
- pos_rtx = expand_expr (position, 0, VOIDmode, 0);
+ position =
+ fold_convert (ssizetype,
+ fold_build2 (MINUS_EXPR,
+ TREE_TYPE (index),
+ index,
+ TYPE_MIN_VALUE (domain)));
+
+ position =
+ size_binop (MULT_EXPR, position,
+ fold_convert (ssizetype,
+ TYPE_SIZE_UNIT (elttype)));
+
+ pos_rtx = expand_normal (position);
xtarget = offset_address (target, pos_rtx,
highest_pow2_factor (position));
xtarget = adjust_address (xtarget, mode, 0);
store_constructor (value, xtarget, cleared,
bitsize / BITS_PER_UNIT);
else
- store_expr (value, xtarget, 0);
+ store_expr (value, xtarget, 0, false);
/* Generate a conditional jump to exit the loop. */
exit_cond = build2 (LT_EXPR, integer_type_node,
index, hi_index);
jumpif (exit_cond, loop_end);
-
+
/* Update the loop counter, and jump to the head of
the loop. */
expand_assignment (index,
build2 (PLUS_EXPR, TREE_TYPE (index),
- index, integer_one_node), 0);
-
+ index, integer_one_node),
+ false);
+
emit_jump (loop_start);
-
+
/* Build the end of the loop. */
emit_label (loop_end);
}
|| ! host_integerp (TYPE_SIZE (elttype), 1))
{
tree position;
-
+
if (index == 0)
index = ssize_int (1);
-
+
if (minelt)
index = fold_convert (ssizetype,
- fold (build2 (MINUS_EXPR,
- TREE_TYPE (index),
- index,
- TYPE_MIN_VALUE (domain))));
-
- position = size_binop (MULT_EXPR, index,
- convert (ssizetype,
- TYPE_SIZE_UNIT (elttype)));
+ fold_build2 (MINUS_EXPR,
+ TREE_TYPE (index),
+ index,
+ TYPE_MIN_VALUE (domain)));
+
+ position =
+ size_binop (MULT_EXPR, index,
+ fold_convert (ssizetype,
+ TYPE_SIZE_UNIT (elttype)));
xtarget = offset_address (target,
- expand_expr (position, 0, VOIDmode, 0),
+ expand_normal (position),
highest_pow2_factor (position));
xtarget = adjust_address (xtarget, mode, 0);
- store_expr (value, xtarget, 0);
+ store_expr (value, xtarget, 0, false);
}
else
{
* tree_low_cst (TYPE_SIZE (elttype), 1));
else
bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1));
-
+
if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (type))
case VECTOR_TYPE:
{
- tree elt;
+ unsigned HOST_WIDE_INT idx;
+ constructor_elt *ce;
int i;
int need_to_clear;
int icode = 0;
enum machine_mode eltmode = TYPE_MODE (elttype);
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos;
- rtx *vector = NULL;
+ rtvec vector = NULL;
unsigned n_elts;
-
+
gcc_assert (eltmode != BLKmode);
-
+
n_elts = TYPE_VECTOR_SUBPARTS (type);
if (REG_P (target) && VECTOR_MODE_P (GET_MODE (target)))
{
enum machine_mode mode = GET_MODE (target);
-
+
icode = (int) vec_init_optab->handlers[mode].insn_code;
if (icode != CODE_FOR_nothing)
{
unsigned int i;
-
- vector = alloca (n_elts);
+
+ vector = rtvec_alloc (n_elts);
for (i = 0; i < n_elts; i++)
- vector [i] = CONST0_RTX (GET_MODE_INNER (mode));
+ RTVEC_ELT (vector, i) = CONST0_RTX (GET_MODE_INNER (mode));
}
}
-
+
/* If the constructor has fewer elements than the vector,
clear the whole array first. Similarly if this is static
constructor of a non-BLKmode object. */
else
{
unsigned HOST_WIDE_INT count = 0, zero_count = 0;
-
- for (elt = CONSTRUCTOR_ELTS (exp);
- elt != NULL_TREE;
- elt = TREE_CHAIN (elt))
+ tree value;
+
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
{
int n_elts_here = tree_low_cst
(int_const_binop (TRUNC_DIV_EXPR,
- TYPE_SIZE (TREE_TYPE (TREE_VALUE (elt))),
+ TYPE_SIZE (TREE_TYPE (value)),
TYPE_SIZE (elttype), 0), 1);
-
+
count += n_elts_here;
- if (mostly_zeros_p (TREE_VALUE (elt)))
+ if (mostly_zeros_p (value))
zero_count += n_elts_here;
}
or if the incidence of zero elements is >= 75%. */
need_to_clear = (count < n_elts || 4 * zero_count >= 3 * count);
}
-
+
if (need_to_clear && size > 0 && !vector)
{
if (REG_P (target))
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
else
- clear_storage (target, GEN_INT (size));
+ clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
cleared = 1;
}
-
- if (!cleared && REG_P (target))
- /* Inform later passes that the old value is dead. */
- emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ /* Inform later passes that the old value is dead. */
+ if (!cleared && !vector && REG_P (target))
+ emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
/* Store each element of the constructor into the corresponding
element of TARGET, determined by counting the elements. */
- for (elt = CONSTRUCTOR_ELTS (exp), i = 0;
- elt;
- elt = TREE_CHAIN (elt), i += bitsize / elt_size)
+ for (idx = 0, i = 0;
+ VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
+ idx++, i += bitsize / elt_size)
{
- tree value = TREE_VALUE (elt);
- tree index = TREE_PURPOSE (elt);
HOST_WIDE_INT eltpos;
-
+ tree value = ce->value;
+
bitsize = tree_low_cst (TYPE_SIZE (TREE_TYPE (value)), 1);
if (cleared && initializer_zerop (value))
continue;
-
- if (index != 0)
- eltpos = tree_low_cst (index, 1);
+
+ if (ce->index)
+ eltpos = tree_low_cst (ce->index, 1);
else
eltpos = i;
-
+
if (vector)
{
/* Vector CONSTRUCTORs should only be built from smaller
vectors in the case of BLKmode vectors. */
gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
- vector[eltpos] = expand_expr (value, NULL_RTX, VOIDmode, 0);
+ RTVEC_ELT (vector, eltpos)
+ = expand_normal (value);
}
else
{
cleared, get_alias_set (elttype));
}
}
-
+
if (vector)
emit_insn (GEN_FCN (icode)
(target,
- gen_rtx_PARALLEL (GET_MODE (target),
- gen_rtvec_v (n_elts, vector))));
+ gen_rtx_PARALLEL (GET_MODE (target), vector)));
break;
}
- /* Set constructor assignments. */
- case SET_TYPE:
- {
- tree elt = CONSTRUCTOR_ELTS (exp);
- unsigned HOST_WIDE_INT nbytes = int_size_in_bytes (type), nbits;
- tree domain = TYPE_DOMAIN (type);
- tree domain_min, domain_max, bitlength;
-
- /* The default implementation strategy is to extract the
- constant parts of the constructor, use that to initialize
- the target, and then "or" in whatever non-constant ranges
- we need in addition.
-
- If a large set is all zero or all ones, it is probably
- better to set it using memset. Also, if a large set has
- just a single range, it may also be better to first clear
- all the first clear the set (using memset), and set the
- bits we want. */
-
- /* Check for all zeros. */
- if (elt == NULL_TREE && size > 0)
- {
- if (!cleared)
- clear_storage (target, GEN_INT (size));
- return;
- }
-
- domain_min = convert (sizetype, TYPE_MIN_VALUE (domain));
- domain_max = convert (sizetype, TYPE_MAX_VALUE (domain));
- bitlength = size_binop (PLUS_EXPR,
- size_diffop (domain_max, domain_min),
- ssize_int (1));
-
- nbits = tree_low_cst (bitlength, 1);
-
- /* For "small" sets, or "medium-sized" (up to 32 bytes) sets
- that are "complicated" (more than one range), initialize
- (the constant parts) by copying from a constant. */
- if (GET_MODE (target) != BLKmode || nbits <= 2 * BITS_PER_WORD
- || (nbytes <= 32 && TREE_CHAIN (elt) != NULL_TREE))
- {
- unsigned int set_word_size = TYPE_ALIGN (TREE_TYPE (exp));
- enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1);
- char *bit_buffer = alloca (nbits);
- HOST_WIDE_INT word = 0;
- unsigned int bit_pos = 0;
- unsigned int ibit = 0;
- unsigned int offset = 0; /* In bytes from beginning of set. */
-
- elt = get_set_constructor_bits (exp, bit_buffer, nbits);
- for (;;)
- {
- if (bit_buffer[ibit])
- {
- if (BYTES_BIG_ENDIAN)
- word |= (1 << (set_word_size - 1 - bit_pos));
- else
- word |= 1 << bit_pos;
- }
-
- bit_pos++; ibit++;
- if (bit_pos >= set_word_size || ibit == nbits)
- {
- if (word != 0 || ! cleared)
- {
- rtx datum = gen_int_mode (word, mode);
- rtx to_rtx;
-
- /* The assumption here is that it is safe to
- use XEXP if the set is multi-word, but not
- if it's single-word. */
- if (MEM_P (target))
- to_rtx = adjust_address (target, mode, offset);
- else
- {
- gcc_assert (!offset);
- to_rtx = target;
- }
- emit_move_insn (to_rtx, datum);
- }
-
- if (ibit == nbits)
- break;
- word = 0;
- bit_pos = 0;
- offset += set_word_size / BITS_PER_UNIT;
- }
- }
- }
- else if (!cleared)
- /* Don't bother clearing storage if the set is all ones. */
- if (TREE_CHAIN (elt) != NULL_TREE
- || (TREE_PURPOSE (elt) == NULL_TREE
- ? nbits != 1
- : ( ! host_integerp (TREE_VALUE (elt), 0)
- || ! host_integerp (TREE_PURPOSE (elt), 0)
- || (tree_low_cst (TREE_VALUE (elt), 0)
- - tree_low_cst (TREE_PURPOSE (elt), 0) + 1
- != (HOST_WIDE_INT) nbits))))
- clear_storage (target, expr_size (exp));
-
- for (; elt != NULL_TREE; elt = TREE_CHAIN (elt))
- {
- /* Start of range of element or NULL. */
- tree startbit = TREE_PURPOSE (elt);
- /* End of range of element, or element value. */
- tree endbit = TREE_VALUE (elt);
- HOST_WIDE_INT startb, endb;
- rtx bitlength_rtx, startbit_rtx, endbit_rtx, targetx;
-
- bitlength_rtx = expand_expr (bitlength,
- NULL_RTX, MEM, EXPAND_CONST_ADDRESS);
-
- /* Handle non-range tuple element like [ expr ]. */
- if (startbit == NULL_TREE)
- {
- startbit = save_expr (endbit);
- endbit = startbit;
- }
-
- startbit = convert (sizetype, startbit);
- endbit = convert (sizetype, endbit);
- if (! integer_zerop (domain_min))
- {
- startbit = size_binop (MINUS_EXPR, startbit, domain_min);
- endbit = size_binop (MINUS_EXPR, endbit, domain_min);
- }
- startbit_rtx = expand_expr (startbit, NULL_RTX, MEM,
- EXPAND_CONST_ADDRESS);
- endbit_rtx = expand_expr (endbit, NULL_RTX, MEM,
- EXPAND_CONST_ADDRESS);
-
- if (REG_P (target))
- {
- targetx
- = assign_temp
- ((build_qualified_type (lang_hooks.types.type_for_mode
- (GET_MODE (target), 0),
- TYPE_QUAL_CONST)),
- 0, 1, 1);
- emit_move_insn (targetx, target);
- }
-
- else
- {
- gcc_assert (MEM_P (target));
- targetx = target;
- }
-
- /* Optimization: If startbit and endbit are constants divisible
- by BITS_PER_UNIT, call memset instead. */
- if (TREE_CODE (startbit) == INTEGER_CST
- && TREE_CODE (endbit) == INTEGER_CST
- && (startb = TREE_INT_CST_LOW (startbit)) % BITS_PER_UNIT == 0
- && (endb = TREE_INT_CST_LOW (endbit) + 1) % BITS_PER_UNIT == 0)
- {
- emit_library_call (memset_libfunc, LCT_NORMAL,
- VOIDmode, 3,
- plus_constant (XEXP (targetx, 0),
- startb / BITS_PER_UNIT),
- Pmode,
- constm1_rtx, TYPE_MODE (integer_type_node),
- GEN_INT ((endb - startb) / BITS_PER_UNIT),
- TYPE_MODE (sizetype));
- }
- else
- emit_library_call (setbits_libfunc, LCT_NORMAL,
- VOIDmode, 4, XEXP (targetx, 0),
- Pmode, bitlength_rtx, TYPE_MODE (sizetype),
- startbit_rtx, TYPE_MODE (sizetype),
- endbit_rtx, TYPE_MODE (sizetype));
-
- if (REG_P (target))
- emit_move_insn (target, targetx);
- }
- break;
- }
default:
gcc_unreachable ();
}
BITSIZE bits, starting BITPOS bits from the start of TARGET.
If MODE is VOIDmode, it means that we are storing into a bit-field.
- If VALUE_MODE is VOIDmode, return nothing in particular.
- UNSIGNEDP is not used in this case.
-
- Otherwise, return an rtx for the value stored. This rtx
- has mode VALUE_MODE if that is convenient to do.
- In this case, UNSIGNEDP must be nonzero if the value is an unsigned type.
+ Always return const0_rtx unless we have something particular to
+ return.
TYPE is the type of the underlying object,
ALIAS_SET is the alias set for the destination. This value will
(in general) be different from that for TARGET, since TARGET is a
- reference to the containing structure. */
+ reference to the containing structure.
+
+ If NONTEMPORAL is true, try generating a nontemporal store. */
static rtx
store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
- enum machine_mode mode, tree exp, enum machine_mode value_mode,
- int unsignedp, tree type, int alias_set)
+ enum machine_mode mode, tree exp, tree type, int alias_set,
+ bool nontemporal)
{
HOST_WIDE_INT width_mask = 0;
/* If we have nothing to store, do nothing unless the expression has
side-effects. */
if (bitsize == 0)
- return expand_expr (exp, const0_rtx, VOIDmode, 0);
+ return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
else if (bitsize >= 0 && bitsize < HOST_BITS_PER_WIDE_INT)
width_mask = ((HOST_WIDE_INT) 1 << bitsize) - 1;
if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target)))
emit_move_insn (object, target);
- store_field (blk_object, bitsize, bitpos, mode, exp, VOIDmode, 0, type,
- alias_set);
+ store_field (blk_object, bitsize, bitpos, mode, exp, type, alias_set,
+ nontemporal);
emit_move_insn (target, object);
/* We're storing into a struct containing a single __complex. */
gcc_assert (!bitpos);
- return store_expr (exp, target, value_mode != VOIDmode);
+ return store_expr (exp, target, 0, nontemporal);
}
/* If the structure is in a register or if the component
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
&& compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) != 0))
{
- rtx temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+ rtx temp;
+
+ /* If EXP is a NOP_EXPR of precision less than its mode, then that
+ implies a mask operation. If the precision is the same size as
+ the field we're storing into, that mask is redundant. This is
+ particularly common with bit field assignments generated by the
+ C front end. */
+ if (TREE_CODE (exp) == NOP_EXPR)
+ {
+ tree type = TREE_TYPE (exp);
+ if (INTEGRAL_TYPE_P (type)
+ && TYPE_PRECISION (type) < GET_MODE_BITSIZE (TYPE_MODE (type))
+ && bitsize == TYPE_PRECISION (type))
+ {
+ type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) >= bitsize)
+ exp = TREE_OPERAND (exp, 0);
+ }
+ }
+
+ temp = expand_normal (exp);
/* If BITSIZE is narrower than the size of the type of EXP
we will be narrowing TEMP. Normally, what's wanted are the
/ BITS_PER_UNIT),
BLOCK_OP_NORMAL);
- return value_mode == VOIDmode ? const0_rtx : target;
+ return const0_rtx;
}
/* Store the value in the bitfield. */
store_bit_field (target, bitsize, bitpos, mode, temp);
- if (value_mode != VOIDmode)
- {
- /* The caller wants an rtx for the value.
- If possible, avoid refetching from the bitfield itself. */
- if (width_mask != 0
- && ! (MEM_P (target) && MEM_VOLATILE_P (target)))
- {
- tree count;
- enum machine_mode tmode;
-
- tmode = GET_MODE (temp);
- if (tmode == VOIDmode)
- tmode = value_mode;
-
- if (unsignedp)
- return expand_and (tmode, temp,
- gen_int_mode (width_mask, tmode),
- NULL_RTX);
-
- count = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (tmode) - bitsize);
- temp = expand_shift (LSHIFT_EXPR, tmode, temp, count, 0, 0);
- return expand_shift (RSHIFT_EXPR, tmode, temp, count, 0, 0);
- }
-
- return extract_bit_field (target, bitsize, bitpos, unsignedp,
- NULL_RTX, value_mode, VOIDmode);
- }
return const0_rtx;
}
else
{
- rtx addr = XEXP (target, 0);
- rtx to_rtx = target;
-
- /* If a value is wanted, it must be the lhs;
- so make the address stable for multiple use. */
-
- if (value_mode != VOIDmode && !REG_P (addr)
- && ! CONSTANT_ADDRESS_P (addr)
- /* A frame-pointer reference is already stable. */
- && ! (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 1)) == CONST_INT
- && (XEXP (addr, 0) == virtual_incoming_args_rtx
- || XEXP (addr, 0) == virtual_stack_vars_rtx)))
- to_rtx = replace_equiv_address (to_rtx, copy_to_reg (addr));
-
/* Now build a reference to just the desired component. */
-
- to_rtx = adjust_address (target, mode, bitpos / BITS_PER_UNIT);
+ rtx to_rtx = adjust_address (target, mode, bitpos / BITS_PER_UNIT);
if (to_rtx == target)
to_rtx = copy_rtx (to_rtx);
if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
set_mem_alias_set (to_rtx, alias_set);
- return store_expr (exp, to_rtx, value_mode != VOIDmode);
+ return store_expr (exp, to_rtx, 0, nontemporal);
}
}
\f
If the field describes a variable-sized object, *PMODE is set to
VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
- this case, but the address of the object can be found. */
+ this case, but the address of the object can be found.
+
+ If KEEP_ALIGNING is true and the target is STRICT_ALIGNMENT, we don't
+ look through nodes that serve as markers of a greater alignment than
+ the one that can be deduced from the expression. These nodes make it
+ possible for front-ends to prevent temporaries from being created by
+ the middle-end on alignment considerations. For that purpose, the
+ normal operating mode at high-level is to always pass FALSE so that
+ the ultimate containing object is really returned; moreover, the
+ associated predicate handled_component_p will always return TRUE
+ on these nodes, thus indicating that they are essentially handled
+ by get_inner_reference. TRUE should only be passed when the caller
+ is scanning the expression in order to build another representation
+ and specifically knows how to handle these nodes; as such, this is
+ the normal operating mode in the RTL expanders. */
tree
get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
HOST_WIDE_INT *pbitpos, tree *poffset,
enum machine_mode *pmode, int *punsignedp,
- int *pvolatilep)
+ int *pvolatilep, bool keep_aligning)
{
tree size_tree = 0;
enum machine_mode mode = VOIDmode;
{
size_tree = TREE_OPERAND (exp, 1);
*punsignedp = BIT_FIELD_REF_UNSIGNED (exp);
+
+ /* For vector types, with the correct size of access, use the mode of
+ inner type. */
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
+ && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
+ mode = TYPE_MODE (TREE_TYPE (exp));
}
else
{
and find the ultimate containing object. */
while (1)
{
- if (TREE_CODE (exp) == BIT_FIELD_REF)
- bit_offset = size_binop (PLUS_EXPR, bit_offset, TREE_OPERAND (exp, 2));
- else if (TREE_CODE (exp) == COMPONENT_REF)
+ switch (TREE_CODE (exp))
{
- tree field = TREE_OPERAND (exp, 1);
- tree this_offset = component_ref_field_offset (exp);
+ case BIT_FIELD_REF:
+ bit_offset = size_binop (PLUS_EXPR, bit_offset,
+ TREE_OPERAND (exp, 2));
+ break;
- /* If this field hasn't been filled in yet, don't go
- past it. This should only happen when folding expressions
- made during type construction. */
- if (this_offset == 0)
- break;
+ case COMPONENT_REF:
+ {
+ tree field = TREE_OPERAND (exp, 1);
+ tree this_offset = component_ref_field_offset (exp);
+
+ /* If this field hasn't been filled in yet, don't go past it.
+ This should only happen when folding expressions made during
+ type construction. */
+ if (this_offset == 0)
+ break;
+
+ offset = size_binop (PLUS_EXPR, offset, this_offset);
+ bit_offset = size_binop (PLUS_EXPR, bit_offset,
+ DECL_FIELD_BIT_OFFSET (field));
+
+ /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
+ }
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ {
+ tree index = TREE_OPERAND (exp, 1);
+ tree low_bound = array_ref_low_bound (exp);
+ tree unit_size = array_ref_element_size (exp);
+
+ /* We assume all arrays have sizes that are a multiple of a byte.
+ First subtract the lower bound, if any, in the type of the
+ index, then convert to sizetype and multiply by the size of
+ the array element. */
+ if (! integer_zerop (low_bound))
+ index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
+ index, low_bound);
+
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (MULT_EXPR,
+ fold_convert (sizetype, index),
+ unit_size));
+ }
+ break;
+
+ case REALPART_EXPR:
+ break;
- offset = size_binop (PLUS_EXPR, offset, this_offset);
+ case IMAGPART_EXPR:
bit_offset = size_binop (PLUS_EXPR, bit_offset,
- DECL_FIELD_BIT_OFFSET (field));
-
- /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
- }
-
- else if (TREE_CODE (exp) == ARRAY_REF
- || TREE_CODE (exp) == ARRAY_RANGE_REF)
- {
- tree index = TREE_OPERAND (exp, 1);
- tree low_bound = array_ref_low_bound (exp);
- tree unit_size = array_ref_element_size (exp);
-
- /* We assume all arrays have sizes that are a multiple of a byte.
- First subtract the lower bound, if any, in the type of the
- index, then convert to sizetype and multiply by the size of the
- array element. */
- if (! integer_zerop (low_bound))
- index = fold (build2 (MINUS_EXPR, TREE_TYPE (index),
- index, low_bound));
-
- offset = size_binop (PLUS_EXPR, offset,
- size_binop (MULT_EXPR,
- convert (sizetype, index),
- unit_size));
- }
-
- /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal
- conversions that don't change the mode, and all view conversions
- except those that need to "step up" the alignment. */
- else if (TREE_CODE (exp) != NON_LVALUE_EXPR
- && ! (TREE_CODE (exp) == VIEW_CONVERT_EXPR
- && ! ((TYPE_ALIGN (TREE_TYPE (exp))
- > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
- && STRICT_ALIGNMENT
- && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
- < BIGGEST_ALIGNMENT)
- && (TYPE_ALIGN_OK (TREE_TYPE (exp))
- || TYPE_ALIGN_OK (TREE_TYPE
- (TREE_OPERAND (exp, 0))))))
- && ! ((TREE_CODE (exp) == NOP_EXPR
- || TREE_CODE (exp) == CONVERT_EXPR)
- && (TYPE_MODE (TREE_TYPE (exp))
- == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))))
- break;
+ bitsize_int (*pbitsize));
+ break;
+
+ case VIEW_CONVERT_EXPR:
+ if (keep_aligning && STRICT_ALIGNMENT
+ && (TYPE_ALIGN (TREE_TYPE (exp))
+ > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ < BIGGEST_ALIGNMENT)
+ && (TYPE_ALIGN_OK (TREE_TYPE (exp))
+ || TYPE_ALIGN_OK (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ goto done;
+ break;
+
+ default:
+ goto done;
+ }
/* If any reference in the chain is volatile, the effect is volatile. */
if (TREE_THIS_VOLATILE (exp))
exp = TREE_OPERAND (exp, 0);
}
+ done:
/* If OFFSET is constant, see if we can return the whole thing as a
constant bit position. Otherwise, split it up. */
if (host_integerp (offset, 0)
- && 0 != (tem = size_binop (MULT_EXPR, convert (bitsizetype, offset),
+ && 0 != (tem = size_binop (MULT_EXPR,
+ fold_convert (bitsizetype, offset),
bitsize_unit_node))
&& 0 != (tem = size_binop (PLUS_EXPR, tem, bit_offset))
&& host_integerp (tem, 0))
return exp;
}
+/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
+ look for whether EXP or any nested component-refs within EXP is marked
+ as PACKED. */
+
+bool
+contains_packed_reference (tree exp)
+{
+ bool packed_p = false;
+
+ while (1)
+ {
+ switch (TREE_CODE (exp))
+ {
+ case COMPONENT_REF:
+ {
+ tree field = TREE_OPERAND (exp, 1);
+ packed_p = DECL_PACKED (field)
+ || TYPE_PACKED (TREE_TYPE (field))
+ || TYPE_PACKED (TREE_TYPE (exp));
+ if (packed_p)
+ goto done;
+ }
+ break;
+
+ case BIT_FIELD_REF:
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ break;
+
+ default:
+ goto done;
+ }
+ exp = TREE_OPERAND (exp, 0);
+ }
+ done:
+ return packed_p;
+}
+
/* Return a tree of sizetype representing the size, in bytes, of the element
of EXP, an ARRAY_REF. */
/* Return 1 if T is an expression that get_inner_reference handles. */
int
-handled_component_p (tree t)
+handled_component_p (const_tree t)
{
switch (TREE_CODE (t))
{
case COMPONENT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
- case NON_LVALUE_EXPR:
case VIEW_CONVERT_EXPR:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
return 1;
- /* ??? Sure they are handled, but get_inner_reference may return
- a different PBITSIZE, depending upon whether the expression is
- wrapped up in a NOP_EXPR or not, e.g. for bitfields. */
- case NOP_EXPR:
- case CONVERT_EXPR:
- return (TYPE_MODE (TREE_TYPE (t))
- == TYPE_MODE (TREE_TYPE (TREE_OPERAND (t, 0))));
-
default:
return 0;
}
&& !REG_P (SUBREG_REG (value))
&& !MEM_P (SUBREG_REG (value)))
{
- value = simplify_gen_subreg (GET_MODE (value),
- force_reg (GET_MODE (SUBREG_REG (value)),
- force_operand (SUBREG_REG (value),
- NULL_RTX)),
- GET_MODE (SUBREG_REG (value)),
- SUBREG_BYTE (value));
+ value
+ = simplify_gen_subreg (GET_MODE (value),
+ force_reg (GET_MODE (SUBREG_REG (value)),
+ force_operand (SUBREG_REG (value),
+ NULL_RTX)),
+ GET_MODE (SUBREG_REG (value)),
+ SUBREG_BYTE (value));
code = GET_CODE (value);
}
return subtarget;
}
- if (code == ZERO_EXTEND || code == SIGN_EXTEND)
- {
- if (!target)
- target = gen_reg_rtx (GET_MODE (value));
- convert_move (target, force_operand (XEXP (value, 0), NULL),
- code == ZERO_EXTEND);
- return target;
- }
-
if (ARITHMETIC_P (value))
{
op2 = XEXP (value, 1);
FLOAT_MODE_P (GET_MODE (value))
? RDIV_EXPR : TRUNC_DIV_EXPR,
GET_MODE (value), op1, op2, target, 0);
- break;
case MOD:
return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
target, 0);
- break;
case UDIV:
return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
target, 1);
- break;
case UMOD:
return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
target, 1);
- break;
case ASHIFTRT:
return expand_simple_binop (GET_MODE (value), code, op1, op2,
target, 0, OPTAB_LIB_WIDEN);
- break;
default:
return expand_simple_binop (GET_MODE (value), code, op1, op2,
target, 1, OPTAB_LIB_WIDEN);
}
if (UNARY_P (value))
{
+ if (!target)
+ target = gen_reg_rtx (GET_MODE (value));
op1 = force_operand (XEXP (value, 0), NULL_RTX);
- return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
+ switch (code)
+ {
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ case TRUNCATE:
+ case FLOAT_EXTEND:
+ case FLOAT_TRUNCATE:
+ convert_move (target, op1, code == ZERO_EXTEND);
+ return target;
+
+ case FIX:
+ case UNSIGNED_FIX:
+ expand_fix (target, op1, code == UNSIGNED_FIX);
+ return target;
+
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ expand_float (target, op1, code == UNSIGNED_FLOAT);
+ return target;
+
+ default:
+ return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
+ }
}
#ifdef INSN_SCHEDULING
/* Now look at our tree code and possibly recurse. */
switch (TREE_CODE_CLASS (TREE_CODE (exp)))
{
- case 'd':
+ case tcc_declaration:
exp_rtl = DECL_RTL_IF_SET (exp);
break;
- case 'c':
+ case tcc_constant:
return 1;
- case 'x':
+ case tcc_exceptional:
if (TREE_CODE (exp) == TREE_LIST)
{
while (1)
return safe_from_p (x, exp, 0);
}
}
+ else if (TREE_CODE (exp) == CONSTRUCTOR)
+ {
+ constructor_elt *ce;
+ unsigned HOST_WIDE_INT idx;
+
+ for (idx = 0;
+ VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
+ idx++)
+ if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
+ || !safe_from_p (x, ce->value, 0))
+ return 0;
+ return 1;
+ }
else if (TREE_CODE (exp) == ERROR_MARK)
return 1; /* An already-visited SAVE_EXPR? */
else
return 0;
- case 's':
+ case tcc_statement:
/* The only case we look at here is the DECL_INITIAL inside a
DECL_EXPR. */
return (TREE_CODE (exp) != DECL_EXPR
|| !DECL_INITIAL (DECL_EXPR_DECL (exp))
|| safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
- case '2':
- case '<':
+ case tcc_binary:
+ case tcc_comparison:
if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
return 0;
/* Fall through. */
- case '1':
+ case tcc_unary:
return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
- case 'e':
- case 'r':
+ case tcc_expression:
+ case tcc_reference:
+ case tcc_vl_exp:
/* Now do code-specific tests. EXP_RTL is set to any rtx we find in
the expression. If it is set, we conflict iff we are that rtx or
both are in memory. Otherwise, we check all operands of the
}
break;
+ case MISALIGNED_INDIRECT_REF:
+ case ALIGN_INDIRECT_REF:
case INDIRECT_REF:
if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
if (exp_rtl)
break;
- nops = first_rtl_op (TREE_CODE (exp));
+ nops = TREE_OPERAND_LENGTH (exp);
for (i = 0; i < nops; i++)
if (TREE_OPERAND (exp, i) != 0
&& ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
return 0;
- /* If this is a language-specific tree code, it may require
- special handling. */
- if ((unsigned int) TREE_CODE (exp)
- >= (unsigned int) LAST_AND_UNUSED_TREE_CODE
- && !lang_hooks.safe_from_p (x, exp))
- return 0;
+ break;
+
+ case tcc_type:
+ /* Should never get a type here. */
+ gcc_unreachable ();
+
+ case tcc_gimple_stmt:
+ gcc_unreachable ();
}
/* If we have an rtl, find any enclosed object. Then see if we conflict
/* Return the highest power of two that EXP is known to be a multiple of.
This is used in updating alignment of MEMs in array references. */
-static unsigned HOST_WIDE_INT
+unsigned HOST_WIDE_INT
highest_pow2_factor (tree exp)
{
unsigned HOST_WIDE_INT c0, c1;
a MIN_EXPR, or a MAX_EXPR. If the constant overflows, we have an
erroneous program, so return BIGGEST_ALIGNMENT to avoid any
later ICE. */
- if (TREE_CONSTANT_OVERFLOW (exp))
+ if (TREE_OVERFLOW (exp))
return BIGGEST_ALIGNMENT;
else
{
return MAX (factor, target_align);
}
\f
+/* Return &VAR expression for emulated thread local VAR. */
+
+static tree
+emutls_var_address (tree var)
+{
+ tree emuvar = emutls_decl (var);
+ tree fn = built_in_decls [BUILT_IN_EMUTLS_GET_ADDRESS];
+ tree arg = build_fold_addr_expr_with_type (emuvar, ptr_type_node);
+ tree arglist = build_tree_list (NULL_TREE, arg);
+ tree call = build_function_call_expr (fn, arglist);
+ return fold_convert (build_pointer_type (TREE_TYPE (var)), call);
+}
+\f
/* Expands variable VAR. */
void
? !TREE_ASM_WRITTEN (var)
: !DECL_RTL_SET_P (var))
{
- if (TREE_CODE (var) == VAR_DECL && DECL_VALUE_EXPR (var))
+ if (TREE_CODE (var) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (var))
/* Should be ignored. */;
else if (lang_hooks.expand_decl (var))
/* OK. */;
}
\f
-/* A subroutine of expand_expr. Evaluate the address of EXP.
+/* Return a MEM that contains constant EXP. DEFER is as for
+ output_constant_def and MODIFIER is as for expand_expr. */
+
+static rtx
+expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
+{
+ rtx mem;
+
+ mem = output_constant_def (exp, defer);
+ if (modifier != EXPAND_INITIALIZER)
+ mem = use_anchored_address (mem);
+ return mem;
+}
+
+/* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
static rtx
-expand_expr_addr_expr (tree exp, rtx target, enum machine_mode tmode,
- enum expand_modifier modifier)
+expand_expr_addr_expr_1 (tree exp, rtx target, enum machine_mode tmode,
+ enum expand_modifier modifier)
{
rtx result, subtarget;
tree inner, offset;
generating ADDR_EXPR of something that isn't an LVALUE. The only
exception here is STRING_CST. */
if (TREE_CODE (exp) == CONSTRUCTOR
- || TREE_CODE_CLASS (TREE_CODE (exp)) == 'c')
- return XEXP (output_constant_def (exp, 0), 0);
+ || CONSTANT_CLASS_P (exp))
+ return XEXP (expand_expr_constant (exp, 0, modifier), 0);
/* Everything must be something allowed by is_gimple_addressable. */
switch (TREE_CODE (exp))
{
case INDIRECT_REF:
/* This case will happen via recursion for &a->b. */
- return expand_expr (TREE_OPERAND (exp, 0), target, tmode, EXPAND_NORMAL);
+ return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
case CONST_DECL:
/* Recurse and make the output_constant_def clause above handle this. */
- return expand_expr_addr_expr (DECL_INITIAL (exp), target,
- tmode, modifier);
+ return expand_expr_addr_expr_1 (DECL_INITIAL (exp), target,
+ tmode, modifier);
case REALPART_EXPR:
/* The real part of the complex number is always first, therefore
inner = TREE_OPERAND (exp, 0);
break;
+ case VAR_DECL:
+ /* TLS emulation hook - replace __thread VAR's &VAR with
+ __emutls_get_address (&_emutls.VAR). */
+ if (! targetm.have_tls
+ && TREE_CODE (exp) == VAR_DECL
+ && DECL_THREAD_LOCAL_P (exp))
+ {
+ exp = emutls_var_address (exp);
+ return expand_expr (exp, target, tmode, modifier);
+ }
+ /* Fall through. */
+
default:
/* If the object is a DECL, then expand it for its rtl. Don't bypass
expand_expr, as that can have various side effects; LABEL_DECLs for
/* If the DECL isn't in memory, then the DECL wasn't properly
marked TREE_ADDRESSABLE, which will be either a front-end
or a tree optimizer bug. */
- gcc_assert (GET_CODE (result) == MEM);
+ gcc_assert (MEM_P (result));
result = XEXP (result, 0);
/* ??? Is this needed anymore? */
- if (!TREE_USED (exp) == 0)
+ if (DECL_P (exp) && !TREE_USED (exp) == 0)
{
assemble_external (exp);
TREE_USED (exp) = 1;
return result;
}
+ /* Pass FALSE as the last argument to get_inner_reference although
+ we are expanding to RTL. The rationale is that we know how to
+ handle "aligning nodes" here: we can just bypass them because
+ they won't change the final object whose address will be returned
+ (they actually exist only for that purpose). */
inner = get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode1, &unsignedp, &volatilep);
+ &mode1, &unsignedp, &volatilep, false);
break;
}
gcc_assert (inner != exp);
subtarget = offset || bitpos ? NULL_RTX : target;
- result = expand_expr_addr_expr (inner, subtarget, tmode, modifier);
-
- if (tmode == VOIDmode)
- {
- tmode = GET_MODE (result);
- if (tmode == VOIDmode)
- tmode = Pmode;
- }
+ result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier);
if (offset)
{
if (modifier != EXPAND_NORMAL)
result = force_operand (result, NULL);
- tmp = expand_expr (offset, NULL, tmode, EXPAND_NORMAL);
+ tmp = expand_expr (offset, NULL_RTX, tmode,
+ modifier == EXPAND_INITIALIZER
+ ? EXPAND_INITIALIZER : EXPAND_NORMAL);
+
+ result = convert_memory_address (tmode, result);
+ tmp = convert_memory_address (tmode, tmp);
- if (modifier == EXPAND_SUM)
+ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
result = gen_rtx_PLUS (tmode, result, tmp);
else
{
{
/* Someone beforehand should have rejected taking the address
of such an object. */
- gcc_assert (!(bitpos % BITS_PER_UNIT));
+ gcc_assert ((bitpos % BITS_PER_UNIT) == 0);
result = plus_constant (result, bitpos / BITS_PER_UNIT);
if (modifier < EXPAND_SUM)
return result;
}
+/* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
+ The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
+
+static rtx
+expand_expr_addr_expr (tree exp, rtx target, enum machine_mode tmode,
+ enum expand_modifier modifier)
+{
+ enum machine_mode rmode;
+ rtx result;
+
+ /* Target mode of VOIDmode says "whatever's natural". */
+ if (tmode == VOIDmode)
+ tmode = TYPE_MODE (TREE_TYPE (exp));
+
+ /* We can get called with some Weird Things if the user does silliness
+ like "(short) &a". In that case, convert_memory_address won't do
+ the right thing, so ignore the given target mode. */
+ if (tmode != Pmode && tmode != ptr_mode)
+ tmode = Pmode;
+
+ result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
+ tmode, modifier);
+
+ /* Despite expand_expr claims concerning ignoring TMODE when not
+ strictly convenient, stuff breaks if we don't honor it. Note
+ that combined with the above, we only do this for pointer modes. */
+ rmode = GET_MODE (result);
+ if (rmode == VOIDmode)
+ rmode = tmode;
+ if (rmode != tmode)
+ result = convert_memory_address (tmode, result);
+
+ return result;
+}
+
+
/* expand_expr: generate code for computing expression EXP.
An rtx for the computed value is returned. The value is never null.
In the case of a void EXP, const0_rtx is returned.
/* Handle ERROR_MARK before anybody tries to access its type. */
if (TREE_CODE (exp) == ERROR_MARK
- || TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK)
+ || (!GIMPLE_TUPLE_P (exp) && TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
{
ret = CONST0_RTX (tmode);
return ret ? ret : const0_rtx;
{
location_t saved_location = input_location;
input_location = EXPR_LOCATION (exp);
- emit_line_note (input_location);
+ set_curr_insn_source_location (input_location);
/* Record where the insns produced belong. */
- record_block_change (TREE_BLOCK (exp));
+ set_curr_insn_block (TREE_BLOCK (exp));
ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl);
expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
enum expand_modifier modifier, rtx *alt_rtl)
{
- rtx op0, op1, temp;
- tree type = TREE_TYPE (exp);
+ rtx op0, op1, op2, temp, decl_rtl;
+ tree type;
int unsignedp;
enum machine_mode mode;
enum tree_code code = TREE_CODE (exp);
optab this_optab;
rtx subtarget, original_target;
int ignore;
- tree context;
+ tree context, subexp0, subexp1;
bool reduce_bit_field = false;
#define REDUCE_BIT_FIELD(expr) (reduce_bit_field && !ignore \
? reduce_to_bit_field_precision ((expr), \
type) \
: (expr))
- mode = TYPE_MODE (type);
- unsignedp = TYPE_UNSIGNED (type);
+ if (GIMPLE_STMT_P (exp))
+ {
+ type = void_type_node;
+ mode = VOIDmode;
+ unsignedp = 0;
+ }
+ else
+ {
+ type = TREE_TYPE (exp);
+ mode = TYPE_MODE (type);
+ unsignedp = TYPE_UNSIGNED (type);
+ }
if (lang_hooks.reduce_bit_field_operations
&& TREE_CODE (type) == INTEGER_TYPE
&& GET_MODE_PRECISION (mode) > TYPE_PRECISION (type))
return const0_rtx;
}
- if (TREE_CODE_CLASS (code) == '1' || code == COMPONENT_REF
- || code == INDIRECT_REF)
+ if (TREE_CODE_CLASS (code) == tcc_unary
+ || code == COMPONENT_REF || code == INDIRECT_REF)
return expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
modifier);
- else if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<'
+ else if (TREE_CODE_CLASS (code) == tcc_binary
+ || TREE_CODE_CLASS (code) == tcc_comparison
|| code == ARRAY_REF || code == ARRAY_RANGE_REF)
{
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier);
expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, modifier);
return const0_rtx;
}
- else if ((code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)
- && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
- /* If the second operand has no side effects, just evaluate
- the first. */
- return expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
- modifier);
else if (code == BIT_FIELD_REF)
{
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier);
target = 0;
}
- /* If will do cse, generate all results into pseudo registers
- since 1) that allows cse to find more things
- and 2) otherwise cse could produce an insn the machine
- cannot support. An exception is a CONSTRUCTOR into a multi-word
- MEM: that's much more likely to be most efficient into the MEM.
- Another is a CALL_EXPR which must return in memory. */
-
- if (! cse_not_expected && mode != BLKmode && target
- && (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
- && ! (code == CONSTRUCTOR && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
- && ! (code == CALL_EXPR && aggregate_value_p (exp, exp)))
- target = 0;
switch (code)
{
&& (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
layout_decl (exp, 0);
+ /* TLS emulation hook - replace __thread vars with
+ *__emutls_get_address (&_emutls.var). */
+ if (! targetm.have_tls
+ && TREE_CODE (exp) == VAR_DECL
+ && DECL_THREAD_LOCAL_P (exp))
+ {
+ exp = build_fold_indirect_ref (emutls_var_address (exp));
+ return expand_expr_real_1 (exp, target, tmode, modifier, NULL);
+ }
+
/* ... fall through ... */
case FUNCTION_DECL:
case RESULT_DECL:
- gcc_assert (DECL_RTL (exp));
+ decl_rtl = DECL_RTL (exp);
+ gcc_assert (decl_rtl);
+ decl_rtl = copy_rtx (decl_rtl);
/* Ensure variable marked as used even if it doesn't go through
a parser. If it hasn't be used yet, write out an external
from its initializer, while the initializer is still being parsed.
See expand_decl. */
- if (MEM_P (DECL_RTL (exp))
- && REG_P (XEXP (DECL_RTL (exp), 0)))
- temp = validize_mem (DECL_RTL (exp));
+ if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
+ temp = validize_mem (decl_rtl);
/* If DECL_RTL is memory, we are in the normal case and either
the address is not valid or it is not a register and -fforce-addr
is specified, get the address into a register. */
- else if (MEM_P (DECL_RTL (exp))
- && modifier != EXPAND_CONST_ADDRESS
- && modifier != EXPAND_SUM
- && modifier != EXPAND_INITIALIZER
- && (! memory_address_p (DECL_MODE (exp),
- XEXP (DECL_RTL (exp), 0))
- || (flag_force_addr
- && !REG_P (XEXP (DECL_RTL (exp), 0)))))
+ else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
{
if (alt_rtl)
- *alt_rtl = DECL_RTL (exp);
- temp = replace_equiv_address (DECL_RTL (exp),
- copy_rtx (XEXP (DECL_RTL (exp), 0)));
+ *alt_rtl = decl_rtl;
+ decl_rtl = use_anchored_address (decl_rtl);
+ if (modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_SUM
+ && (!memory_address_p (DECL_MODE (exp), XEXP (decl_rtl, 0))
+ || (flag_force_addr && !REG_P (XEXP (decl_rtl, 0)))))
+ temp = replace_equiv_address (decl_rtl,
+ copy_rtx (XEXP (decl_rtl, 0)));
}
/* If we got something, return it. But first, set the alignment
must be a promoted value. We return a SUBREG of the wanted mode,
but mark it so that we know that it was already extended. */
- if (REG_P (DECL_RTL (exp))
- && GET_MODE (DECL_RTL (exp)) != DECL_MODE (exp))
+ if (REG_P (decl_rtl)
+ && GET_MODE (decl_rtl) != DECL_MODE (exp))
{
enum machine_mode pmode;
-
+
/* Get the signedness used for this variable. Ensure we get the
same mode we got when the variable was declared. */
pmode = promote_mode (type, DECL_MODE (exp), &unsignedp,
- (TREE_CODE (exp) == RESULT_DECL ? 1 : 0));
- gcc_assert (GET_MODE (DECL_RTL (exp)) == pmode);
+ (TREE_CODE (exp) == RESULT_DECL
+ || TREE_CODE (exp) == PARM_DECL) ? 1 : 0);
+ gcc_assert (GET_MODE (decl_rtl) == pmode);
- temp = gen_lowpart_SUBREG (mode, DECL_RTL (exp));
+ temp = gen_lowpart_SUBREG (mode, decl_rtl);
SUBREG_PROMOTED_VAR_P (temp) = 1;
SUBREG_PROMOTED_UNSIGNED_SET (temp, unsignedp);
return temp;
}
- return DECL_RTL (exp);
+ return decl_rtl;
case INTEGER_CST:
temp = immed_double_const (TREE_INT_CST_LOW (exp),
TREE_INT_CST_HIGH (exp), mode);
- /* ??? If overflow is set, fold will have done an incomplete job,
- which can result in (plus xx (const_int 0)), which can get
- simplified by validate_replace_rtx during virtual register
- instantiation, which can result in unrecognizable insns.
- Avoid this by forcing all overflows into registers. */
- if (TREE_CONSTANT_OVERFLOW (exp)
- && modifier != EXPAND_INITIALIZER)
- temp = force_reg (mode, temp);
-
return temp;
case VECTOR_CST:
- if (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp))) == MODE_VECTOR_INT
- || GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp))) == MODE_VECTOR_FLOAT)
- return const_vector_from_tree (exp);
- else
- return expand_expr (build1 (CONSTRUCTOR, TREE_TYPE (exp),
- TREE_VECTOR_CST_ELTS (exp)),
- ignore ? const0_rtx : target, tmode, modifier);
+ {
+ tree tmp = NULL_TREE;
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ return const_vector_from_tree (exp);
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ tree type_for_mode = lang_hooks.types.type_for_mode (mode, 1);
+ if (type_for_mode)
+ tmp = fold_unary (VIEW_CONVERT_EXPR, type_for_mode, exp);
+ }
+ if (!tmp)
+ tmp = build_constructor_from_list (type,
+ TREE_VECTOR_CST_ELTS (exp));
+ return expand_expr (tmp, ignore ? const0_rtx : target,
+ tmode, modifier);
+ }
case CONST_DECL:
return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
itarg = XEXP (original_target, 1);
/* Move the real and imaginary parts separately. */
- op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, 0);
- op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, 0);
+ op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
+ op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
if (op0 != rtarg)
emit_move_insn (rtarg, op0);
/* ... fall through ... */
case STRING_CST:
- temp = output_constant_def (exp, 1);
+ temp = expand_expr_constant (exp, 1, modifier);
/* temp contains a constant address.
On RISC machines where a constant address isn't valid,
subexpressions. */
if (ignore)
{
- tree elt;
+ unsigned HOST_WIDE_INT idx;
+ tree value;
- for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
- expand_expr (TREE_VALUE (elt), const0_rtx, VOIDmode, 0);
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
+ expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
return const0_rtx;
}
+ /* Try to avoid creating a temporary at all. This is possible
+ if all of the initializer is zero.
+ FIXME: try to handle all [0..255] initializers we can handle
+ with memset. */
+ else if (TREE_STATIC (exp)
+ && !TREE_ADDRESSABLE (exp)
+ && target != 0 && mode == BLKmode
+ && all_zeros_p (exp))
+ {
+ clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
+ return target;
+ }
+
/* All elts simple constants => refer to a constant in memory. But
if this is a non-BLKmode mode, let it store a field at a time
since that should make a CONST_INT or CONST_DOUBLE when we
|| modifier == EXPAND_CONST_ADDRESS)
&& TREE_CONSTANT (exp)))
{
- rtx constructor = output_constant_def (exp, 1);
+ rtx constructor = expand_expr_constant (exp, 1, modifier);
if (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
return target;
}
+ case MISALIGNED_INDIRECT_REF:
+ case ALIGN_INDIRECT_REF:
case INDIRECT_REF:
{
tree exp1 = TREE_OPERAND (exp, 0);
- tree orig;
if (modifier != EXPAND_WRITE)
{
op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
op0 = memory_address (mode, op0);
+
+ if (code == ALIGN_INDIRECT_REF)
+ {
+ int align = TYPE_ALIGN_UNIT (type);
+ op0 = gen_rtx_AND (Pmode, op0, GEN_INT (-align));
+ op0 = memory_address (mode, op0);
+ }
+
temp = gen_rtx_MEM (mode, op0);
- orig = REF_ORIGINAL (exp);
- if (!orig)
- orig = exp;
- set_mem_attributes (temp, orig, 0);
+ set_mem_attributes (temp, exp, 0);
+
+ /* Resolve the misalignment now, so that we don't have to remember
+ to resolve it later. Of course, this only works for reads. */
+ /* ??? When we get around to supporting writes, we'll have to handle
+ this in store_expr directly. The vectorizer isn't generating
+ those yet, however. */
+ if (code == MISALIGNED_INDIRECT_REF)
+ {
+ int icode;
+ rtx reg, insn;
+
+ gcc_assert (modifier == EXPAND_NORMAL
+ || modifier == EXPAND_STACK_PARM);
+
+ /* The vectorizer should have already checked the mode. */
+ icode = movmisalign_optab->handlers[mode].insn_code;
+ gcc_assert (icode != CODE_FOR_nothing);
+
+ /* We've already validated the memory, and we're creating a
+ new pseudo destination. The predicates really can't fail. */
+ reg = gen_reg_rtx (mode);
+
+ /* Nor can the insn generator. */
+ insn = GEN_FCN (icode) (reg, temp);
+ emit_insn (insn);
+
+ return reg;
+ }
return temp;
}
- case ARRAY_REF:
-
+ case TARGET_MEM_REF:
{
- tree array = TREE_OPERAND (exp, 0);
- tree low_bound = array_ref_low_bound (exp);
- tree index = convert (sizetype, TREE_OPERAND (exp, 1));
- HOST_WIDE_INT i;
+ struct mem_address addr;
- gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
-
- /* Optimize the special-case of a zero lower bound.
+ get_address_description (exp, &addr);
+ op0 = addr_for_mem_ref (&addr, true);
+ op0 = memory_address (mode, op0);
+ temp = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (temp, TMR_ORIGINAL (exp), 0);
+ }
+ return temp;
- We convert the low_bound to sizetype to avoid some problems
- with constant folding. (E.g. suppose the lower bound is 1,
- and its mode is QI. Without the conversion, (ARRAY
- +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */
+ case ARRAY_REF:
- if (! integer_zerop (low_bound))
- index = size_diffop (index, convert (sizetype, low_bound));
+ {
+ tree array = TREE_OPERAND (exp, 0);
+ tree index = TREE_OPERAND (exp, 1);
/* Fold an expression like: "foo"[2].
This is not done in fold so it won't happen inside &.
&& modifier != EXPAND_MEMORY
&& TREE_CODE (array) == CONSTRUCTOR
&& ! TREE_SIDE_EFFECTS (array)
- && TREE_CODE (index) == INTEGER_CST
- && 0 > compare_tree_int (index,
- list_length (CONSTRUCTOR_ELTS
- (TREE_OPERAND (exp, 0)))))
+ && TREE_CODE (index) == INTEGER_CST)
{
- tree elem;
+ unsigned HOST_WIDE_INT ix;
+ tree field, value;
- for (elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)),
- i = TREE_INT_CST_LOW (index);
- elem != 0 && i != 0; i--, elem = TREE_CHAIN (elem))
- ;
-
- if (elem)
- return expand_expr (fold (TREE_VALUE (elem)), target, tmode,
- modifier);
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
+ field, value)
+ if (tree_int_cst_equal (field, index))
+ {
+ if (!TREE_SIDE_EFFECTS (value))
+ return expand_expr (fold (value), target, tmode, modifier);
+ break;
+ }
}
else if (optimize >= 1
if (TREE_CODE (init) == CONSTRUCTOR)
{
- tree elem;
-
- for (elem = CONSTRUCTOR_ELTS (init);
- (elem
- && !tree_int_cst_equal (TREE_PURPOSE (elem), index));
- elem = TREE_CHAIN (elem))
- ;
+ unsigned HOST_WIDE_INT ix;
+ tree field, value;
- if (elem && !TREE_SIDE_EFFECTS (TREE_VALUE (elem)))
- return expand_expr (fold (TREE_VALUE (elem)), target,
- tmode, modifier);
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
+ field, value)
+ if (tree_int_cst_equal (field, index))
+ {
+ if (!TREE_SIDE_EFFECTS (value))
+ return expand_expr (fold (value), target, tmode,
+ modifier);
+ break;
+ }
}
- else if (TREE_CODE (init) == STRING_CST
- && 0 > compare_tree_int (index,
- TREE_STRING_LENGTH (init)))
+ else if(TREE_CODE (init) == STRING_CST)
{
- tree type = TREE_TYPE (TREE_TYPE (init));
- enum machine_mode mode = TYPE_MODE (type);
+ tree index1 = index;
+ tree low_bound = array_ref_low_bound (exp);
+ index1 = fold_convert (sizetype, TREE_OPERAND (exp, 1));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_SIZE (mode) == 1)
- return gen_int_mode (TREE_STRING_POINTER (init)
- [TREE_INT_CST_LOW (index)], mode);
+ /* Optimize the special-case of a zero lower bound.
+
+ We convert the low_bound to sizetype to avoid some problems
+ with constant folding. (E.g. suppose the lower bound is 1,
+ and its mode is QI. Without the conversion,l (ARRAY
+ +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
+ +INDEX), which becomes (ARRAY+255+INDEX). Opps!) */
+
+ if (! integer_zerop (low_bound))
+ index1 = size_diffop (index1, fold_convert (sizetype,
+ low_bound));
+
+ if (0 > compare_tree_int (index1,
+ TREE_STRING_LENGTH (init)))
+ {
+ tree type = TREE_TYPE (TREE_TYPE (init));
+ enum machine_mode mode = TYPE_MODE (type);
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == 1)
+ return gen_int_mode (TREE_STRING_POINTER (init)
+ [TREE_INT_CST_LOW (index1)],
+ mode);
+ }
}
}
}
appropriate field if it is present. */
if (TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR)
{
- tree elt;
+ unsigned HOST_WIDE_INT idx;
+ tree field, value;
- for (elt = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)); elt;
- elt = TREE_CHAIN (elt))
- if (TREE_PURPOSE (elt) == TREE_OPERAND (exp, 1)
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)),
+ idx, field, value)
+ if (field == TREE_OPERAND (exp, 1)
/* We can normally use the value of the field in the
CONSTRUCTOR. However, if this is a bitfield in
an integral mode that we can fit in a HOST_WIDE_INT,
since this is done implicitly by the constructor. If
the bitfield does not meet either of those conditions,
we can't do this optimization. */
- && (! DECL_BIT_FIELD (TREE_PURPOSE (elt))
- || ((GET_MODE_CLASS (DECL_MODE (TREE_PURPOSE (elt)))
- == MODE_INT)
- && (GET_MODE_BITSIZE (DECL_MODE (TREE_PURPOSE (elt)))
+ && (! DECL_BIT_FIELD (field)
+ || ((GET_MODE_CLASS (DECL_MODE (field)) == MODE_INT)
+ && (GET_MODE_BITSIZE (DECL_MODE (field))
<= HOST_BITS_PER_WIDE_INT))))
{
- if (DECL_BIT_FIELD (TREE_PURPOSE (elt))
+ if (DECL_BIT_FIELD (field)
&& modifier == EXPAND_STACK_PARM)
target = 0;
- op0 = expand_expr (TREE_VALUE (elt), target, tmode, modifier);
- if (DECL_BIT_FIELD (TREE_PURPOSE (elt)))
+ op0 = expand_expr (value, target, tmode, modifier);
+ if (DECL_BIT_FIELD (field))
{
- HOST_WIDE_INT bitsize
- = TREE_INT_CST_LOW (DECL_SIZE (TREE_PURPOSE (elt)));
- enum machine_mode imode
- = TYPE_MODE (TREE_TYPE (TREE_PURPOSE (elt)));
+ HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ enum machine_mode imode = TYPE_MODE (TREE_TYPE (field));
- if (TYPE_UNSIGNED (TREE_TYPE (TREE_PURPOSE (elt))))
+ if (TYPE_UNSIGNED (TREE_TYPE (field)))
{
op1 = GEN_INT (((HOST_WIDE_INT) 1 << bitsize) - 1);
op0 = expand_and (imode, op0, op1, target);
tree offset;
int volatilep = 0;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode1, &unsignedp, &volatilep);
+ &mode1, &unsignedp, &volatilep, true);
rtx orig_op0;
/* If we got back the original object, something is wrong. Perhaps
|| modifier == EXPAND_STACK_PARM)
? modifier : EXPAND_NORMAL);
- /* If this is a constant, put it into a register if it is a
- legitimate constant and OFFSET is 0 and memory if it isn't. */
+ /* If this is a constant, put it into a register if it is a legitimate
+ constant, OFFSET is 0, and we won't try to extract outside the
+ register (in case we were passed a partially uninitialized object
+ or a view_conversion to a larger size). Force the constant to
+ memory otherwise. */
if (CONSTANT_P (op0))
{
enum machine_mode mode = TYPE_MODE (TREE_TYPE (tem));
if (mode != BLKmode && LEGITIMATE_CONSTANT_P (op0)
- && offset == 0)
+ && offset == 0
+ && bitpos + bitsize <= GET_MODE_BITSIZE (mode))
op0 = force_reg (mode, op0);
else
op0 = validize_mem (force_const_mem (mode, op0));
}
- /* Otherwise, if this object not in memory and we either have an
- offset or a BLKmode result, put it there. This case can't occur in
- C, but can in Ada if we have unchecked conversion of an expression
- from a scalar type to an array or record type or for an
- ARRAY_RANGE_REF whose type is BLKmode. */
+ /* Otherwise, if this object not in memory and we either have an
+ offset, a BLKmode result, or a reference outside the object, put it
+ there. Such cases can occur in Ada if we have unchecked conversion
+ of an expression from a scalar type to an array or record type or
+ for an ARRAY_RANGE_REF whose type is BLKmode. */
else if (!MEM_P (op0)
&& (offset != 0
+ || (bitpos + bitsize > GET_MODE_BITSIZE (GET_MODE (op0)))
|| (code == ARRAY_RANGE_REF && mode == BLKmode)))
{
tree nt = build_qualified_type (TREE_TYPE (tem),
necessarily be constant. */
if (mode == BLKmode)
{
- rtx new
- = assign_stack_temp_for_type
- (ext_mode, GET_MODE_BITSIZE (ext_mode), 0, type);
+ HOST_WIDE_INT size = GET_MODE_BITSIZE (ext_mode);
+ rtx new;
+
+ /* If the reference doesn't use the alias set of its type,
+ we cannot create the temporary using that type. */
+ if (component_uses_parent_alias_set (exp))
+ {
+ new = assign_stack_local (ext_mode, size, 0);
+ set_mem_alias_set (new, get_alias_set (exp));
+ }
+ else
+ new = assign_stack_temp_for_type (ext_mode, size, 0, type);
emit_move_insn (new, op0);
op0 = copy_rtx (new);
case CALL_EXPR:
/* Check for a built-in function. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
- && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ if (TREE_CODE (CALL_EXPR_FN (exp)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (CALL_EXPR_FN (exp), 0))
== FUNCTION_DECL)
- && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ && DECL_BUILT_IN (TREE_OPERAND (CALL_EXPR_FN (exp), 0)))
{
- if (DECL_BUILT_IN_CLASS (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ if (DECL_BUILT_IN_CLASS (TREE_OPERAND (CALL_EXPR_FN (exp), 0))
== BUILT_IN_FRONTEND)
return lang_hooks.expand_expr (exp, original_target,
tmode, modifier,
/* Store data into beginning of memory target. */
store_expr (TREE_OPERAND (exp, 0),
adjust_address (target, TYPE_MODE (valtype), 0),
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ modifier == EXPAND_STACK_PARM,
+ false);
else
{
gcc_assert (REG_P (target));
-
+
/* Store this field into a union of the proper type. */
store_field (target,
MIN ((int_size_in_bytes (TREE_TYPE
* BITS_PER_UNIT),
(HOST_WIDE_INT) GET_MODE_BITSIZE (mode)),
0, TYPE_MODE (valtype), TREE_OPERAND (exp, 0),
- VOIDmode, 0, type, 0);
+ type, 0, false);
}
/* Return the entire union. */
return REDUCE_BIT_FIELD (op0);
}
- op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier);
- op0 = REDUCE_BIT_FIELD (op0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode,
+ modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
if (GET_MODE (op0) == mode)
- return op0;
+ ;
/* If OP0 is a constant, just convert it into the proper mode. */
- if (CONSTANT_P (op0))
+ else if (CONSTANT_P (op0))
{
tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
enum machine_mode inner_mode = TYPE_MODE (inner_type);
if (modifier == EXPAND_INITIALIZER)
- return simplify_gen_subreg (mode, op0, inner_mode,
- subreg_lowpart_offset (mode,
- inner_mode));
+ op0 = simplify_gen_subreg (mode, op0, inner_mode,
+ subreg_lowpart_offset (mode,
+ inner_mode));
else
- return convert_modes (mode, inner_mode, op0,
- TYPE_UNSIGNED (inner_type));
+ op0= convert_modes (mode, inner_mode, op0,
+ TYPE_UNSIGNED (inner_type));
}
- if (modifier == EXPAND_INITIALIZER)
- return gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
+ else if (modifier == EXPAND_INITIALIZER)
+ op0 = gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
- if (target == 0)
- return
- convert_to_mode (mode, op0,
- TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ else if (target == 0)
+ op0 = convert_to_mode (mode, op0,
+ TYPE_UNSIGNED (TREE_TYPE
+ (TREE_OPERAND (exp, 0))));
else
- convert_move (target, op0,
- TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
- return target;
+ {
+ convert_move (target, op0,
+ TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ op0 = target;
+ }
+
+ return REDUCE_BIT_FIELD (op0);
case VIEW_CONVERT_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier);
- /* If the input and output modes are both the same, we are done.
- Otherwise, if neither mode is BLKmode and both are integral and within
- a word, we can use gen_lowpart. If neither is true, make sure the
- operand is in memory and convert the MEM to the new mode. */
+ /* If the input and output modes are both the same, we are done. */
if (TYPE_MODE (type) == GET_MODE (op0))
;
+ /* If neither mode is BLKmode, and both modes are the same size
+ then we can use gen_lowpart. */
else if (TYPE_MODE (type) != BLKmode && GET_MODE (op0) != BLKmode
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
- && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD
- && GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD)
- op0 = gen_lowpart (TYPE_MODE (type), op0);
+ && GET_MODE_SIZE (TYPE_MODE (type))
+ == GET_MODE_SIZE (GET_MODE (op0)))
+ {
+ if (GET_CODE (op0) == SUBREG)
+ op0 = force_reg (GET_MODE (op0), op0);
+ op0 = gen_lowpart (TYPE_MODE (type), op0);
+ }
+ /* If both modes are integral, then we can convert from one to the
+ other. */
+ else if (SCALAR_INT_MODE_P (GET_MODE (op0))
+ && SCALAR_INT_MODE_P (TYPE_MODE (type)))
+ op0 = convert_modes (TYPE_MODE (type), GET_MODE (op0), op0,
+ TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ /* As a last resort, spill op0 to memory, and reload it in a
+ different mode. */
else if (!MEM_P (op0))
{
/* If the operand is not a MEM, force it into memory. Since we
- are going to be be changing the mode of the MEM, don't call
+ are going to be changing the mode of the MEM, don't call
force_const_mem for constants because we don't allow pool
constants to change mode. */
tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
return op0;
+ case POINTER_PLUS_EXPR:
+ /* Even though the sizetype mode and the pointer's mode can be different
+ expand is able to handle this correctly and get the correct result out
+ of the PLUS_EXPR code. */
case PLUS_EXPR:
+
+ /* Check if this is a case for multiplication and addition. */
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == MULT_EXPR)
+ {
+ tree subsubexp0, subsubexp1;
+ enum tree_code code0, code1;
+
+ subexp0 = TREE_OPERAND (exp, 0);
+ subsubexp0 = TREE_OPERAND (subexp0, 0);
+ subsubexp1 = TREE_OPERAND (subexp0, 1);
+ code0 = TREE_CODE (subsubexp0);
+ code1 = TREE_CODE (subsubexp1);
+ if (code0 == NOP_EXPR && code1 == NOP_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subsubexp0, 0)))
+ < TYPE_PRECISION (TREE_TYPE (subsubexp0)))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subsubexp0, 0)))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subsubexp1, 0))))
+ && (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subsubexp0, 0)))
+ == TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subsubexp1, 0)))))
+ {
+ tree op0type = TREE_TYPE (TREE_OPERAND (subsubexp0, 0));
+ enum machine_mode innermode = TYPE_MODE (op0type);
+ bool zextend_p = TYPE_UNSIGNED (op0type);
+ this_optab = zextend_p ? umadd_widen_optab : smadd_widen_optab;
+ if (mode == GET_MODE_2XWIDER_MODE (innermode)
+ && (this_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ {
+ expand_operands (TREE_OPERAND (subsubexp0, 0),
+ TREE_OPERAND (subsubexp1, 0),
+ NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_expr (TREE_OPERAND (exp, 1), subtarget,
+ VOIDmode, EXPAND_NORMAL);
+ temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
+ target, unsignedp);
+ gcc_assert (temp);
+ return REDUCE_BIT_FIELD (temp);
+ }
+ }
+ }
+
/* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
something else, make sure we add the register to the constant and
then to the other thing. This case can occur during strength
}
else if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& TREE_CONSTANT (TREE_OPERAND (exp, 0)))
{
rtx constant_part;
return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
case MINUS_EXPR:
+ /* Check if this is a case for multiplication and subtraction. */
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == MULT_EXPR)
+ {
+ tree subsubexp0, subsubexp1;
+ enum tree_code code0, code1;
+
+ subexp1 = TREE_OPERAND (exp, 1);
+ subsubexp0 = TREE_OPERAND (subexp1, 0);
+ subsubexp1 = TREE_OPERAND (subexp1, 1);
+ code0 = TREE_CODE (subsubexp0);
+ code1 = TREE_CODE (subsubexp1);
+ if (code0 == NOP_EXPR && code1 == NOP_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subsubexp0, 0)))
+ < TYPE_PRECISION (TREE_TYPE (subsubexp0)))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subsubexp0, 0)))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subsubexp1, 0))))
+ && (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subsubexp0, 0)))
+ == TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subsubexp1, 0)))))
+ {
+ tree op0type = TREE_TYPE (TREE_OPERAND (subsubexp0, 0));
+ enum machine_mode innermode = TYPE_MODE (op0type);
+ bool zextend_p = TYPE_UNSIGNED (op0type);
+ this_optab = zextend_p ? umsub_widen_optab : smsub_widen_optab;
+ if (mode == GET_MODE_2XWIDER_MODE (innermode)
+ && (this_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ {
+ expand_operands (TREE_OPERAND (subsubexp0, 0),
+ TREE_OPERAND (subsubexp1, 0),
+ NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_expr (TREE_OPERAND (exp, 0), subtarget,
+ VOIDmode, EXPAND_NORMAL);
+ temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
+ target, unsignedp);
+ gcc_assert (temp);
+ return REDUCE_BIT_FIELD (temp);
+ }
+ }
+ }
+
/* For initializers, we are allowed to return a MINUS of two
symbolic constants. Here we handle all cases when both operands
are constant. */
from a narrower type. If this machine supports multiplying
in that narrower type with a result in the desired type,
do it that way, and avoid the explicit type-conversion. */
- if (TREE_CODE (TREE_OPERAND (exp, 0)) == NOP_EXPR
+
+ subexp0 = TREE_OPERAND (exp, 0);
+ subexp1 = TREE_OPERAND (exp, 1);
+ /* First, check if we have a multiplication of one signed and one
+ unsigned operand. */
+ if (TREE_CODE (subexp0) == NOP_EXPR
+ && TREE_CODE (subexp1) == NOP_EXPR
+ && TREE_CODE (type) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subexp0, 0)))
+ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subexp0, 0)))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (subexp1, 0))))
+ && (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subexp0, 0)))
+ != TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subexp1, 0)))))
+ {
+ enum machine_mode innermode
+ = TYPE_MODE (TREE_TYPE (TREE_OPERAND (subexp0, 0)));
+ this_optab = usmul_widen_optab;
+ if (mode == GET_MODE_WIDER_MODE (innermode))
+ {
+ if (this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (subexp0, 0))))
+ expand_operands (TREE_OPERAND (subexp0, 0),
+ TREE_OPERAND (subexp1, 0),
+ NULL_RTX, &op0, &op1, 0);
+ else
+ expand_operands (TREE_OPERAND (subexp0, 0),
+ TREE_OPERAND (subexp1, 0),
+ NULL_RTX, &op1, &op0, 0);
+
+ goto binop3;
+ }
+ }
+ }
+ /* Check for a multiplication with matching signedness. */
+ else if (TREE_CODE (TREE_OPERAND (exp, 0)) == NOP_EXPR
&& TREE_CODE (type) == INTEGER_TYPE
&& (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
< TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))))
optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
- if (mode == GET_MODE_WIDER_MODE (innermode))
+ if (mode == GET_MODE_2XWIDER_MODE (innermode))
{
if (this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
expand_operands (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
TREE_OPERAND (exp, 1),
- NULL_RTX, &op0, &op1, 0);
+ NULL_RTX, &op0, &op1, EXPAND_NORMAL);
else
expand_operands (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
TREE_OPERAND (TREE_OPERAND (exp, 1), 0),
- NULL_RTX, &op0, &op1, 0);
+ NULL_RTX, &op0, &op1, EXPAND_NORMAL);
goto binop3;
}
else if (other_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
&& innermode == word_mode)
{
rtx htem, hipart;
- op0 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
- NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (TREE_OPERAND (TREE_OPERAND (exp, 0), 0));
if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
op1 = convert_modes (innermode, mode,
- expand_expr (TREE_OPERAND (exp, 1),
- NULL_RTX, VOIDmode, 0),
+ expand_normal (TREE_OPERAND (exp, 1)),
unsignedp);
else
- op1 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 1), 0),
- NULL_RTX, VOIDmode, 0);
+ op1 = expand_normal (TREE_OPERAND (TREE_OPERAND (exp, 1), 0));
temp = expand_binop (mode, other_optab, op0, op1, target,
unsignedp, OPTAB_LIB_WIDEN);
hipart = gen_highpart (innermode, temp);
return expand_divmod (0, code, mode, op0, op1, target, unsignedp);
case RDIV_EXPR:
- /* Emit a/b as a*(1/b). Later we may manage CSE the reciprocal saving
- expensive divide. If not, combine will rebuild the original
- computation. */
- if (flag_unsafe_math_optimizations && optimize && !optimize_size
- && TREE_CODE (type) == REAL_TYPE
- && !real_onep (TREE_OPERAND (exp, 0)))
- return expand_expr (build2 (MULT_EXPR, type, TREE_OPERAND (exp, 0),
- build2 (RDIV_EXPR, type,
- build_real (type, dconst1),
- TREE_OPERAND (exp, 1))),
- target, tmode, modifier);
-
goto binop;
case TRUNC_MOD_EXPR:
subtarget, &op0, &op1, 0);
return expand_divmod (1, code, mode, op0, op1, target, unsignedp);
- case FIX_ROUND_EXPR:
- case FIX_FLOOR_EXPR:
- case FIX_CEIL_EXPR:
- gcc_unreachable (); /* Not used for C. */
-
case FIX_TRUNC_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
if (target == 0 || modifier == EXPAND_STACK_PARM)
target = gen_reg_rtx (mode);
expand_fix (target, op0, unsignedp);
return target;
case FLOAT_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
if (target == 0 || modifier == EXPAND_STACK_PARM)
target = gen_reg_rtx (mode);
/* expand_float can't figure out what to do if FROM has VOIDmode.
return target;
case NEGATE_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget,
+ VOIDmode, EXPAND_NORMAL);
if (modifier == EXPAND_STACK_PARM)
target = 0;
temp = expand_unop (mode,
return REDUCE_BIT_FIELD (temp);
case ABS_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget,
+ VOIDmode, EXPAND_NORMAL);
if (modifier == EXPAND_STACK_PARM)
target = 0;
/* At this point, a MEM target is no longer useful; we will get better
code without it. */
- if (MEM_P (target))
+ if (! REG_P (target))
target = gen_reg_rtx (mode);
/* If op1 was placed in target, swap op0 and op1. */
if (target != op0 && target == op1)
{
- rtx tem = op0;
+ temp = op0;
op0 = op1;
- op1 = tem;
+ op1 = temp;
}
- if (target != op0)
- emit_move_insn (target, op0);
+ /* We generate better code and avoid problems with op1 mentioning
+ target by forcing op1 into a pseudo if it isn't a constant. */
+ if (! CONSTANT_P (op1))
+ op1 = force_reg (mode, op1);
- op0 = gen_label_rtx ();
+ {
+ enum rtx_code comparison_code;
+ rtx cmpop1 = op1;
- /* If this mode is an integer too wide to compare properly,
- compare word by word. Rely on cse to optimize constant cases. */
- if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (GE, mode, ccp_jump))
- {
- if (code == MAX_EXPR)
- do_jump_by_parts_greater_rtx (mode, unsignedp, target, op1,
- NULL_RTX, op0);
- else
- do_jump_by_parts_greater_rtx (mode, unsignedp, op1, target,
- NULL_RTX, op0);
- }
- else
- {
- do_compare_rtx_and_jump (target, op1, code == MAX_EXPR ? GE : LE,
- unsignedp, mode, NULL_RTX, NULL_RTX, op0);
- }
+ if (code == MAX_EXPR)
+ comparison_code = unsignedp ? GEU : GE;
+ else
+ comparison_code = unsignedp ? LEU : LE;
+
+ /* Canonicalize to comparisons against 0. */
+ if (op1 == const1_rtx)
+ {
+ /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
+ or (a != 0 ? a : 1) for unsigned.
+ For MIN we are safe converting (a <= 1 ? a : 1)
+ into (a <= 0 ? a : 1) */
+ cmpop1 = const0_rtx;
+ if (code == MAX_EXPR)
+ comparison_code = unsignedp ? NE : GT;
+ }
+ if (op1 == constm1_rtx && !unsignedp)
+ {
+ /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
+ and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
+ cmpop1 = const0_rtx;
+ if (code == MIN_EXPR)
+ comparison_code = LT;
+ }
+#ifdef HAVE_conditional_move
+ /* Use a conditional move if possible. */
+ if (can_conditionally_move_p (mode))
+ {
+ rtx insn;
+
+ /* ??? Same problem as in expmed.c: emit_conditional_move
+ forces a stack adjustment via compare_from_rtx, and we
+ lose the stack adjustment if the sequence we are about
+ to create is discarded. */
+ do_pending_stack_adjust ();
+
+ start_sequence ();
+
+ /* Try to emit the conditional move. */
+ insn = emit_conditional_move (target, comparison_code,
+ op0, cmpop1, mode,
+ op0, op1, mode,
+ unsignedp);
+
+ /* If we could do the conditional move, emit the sequence,
+ and return. */
+ if (insn)
+ {
+ rtx seq = get_insns ();
+ end_sequence ();
+ emit_insn (seq);
+ return target;
+ }
+
+ /* Otherwise discard the sequence and fall back to code with
+ branches. */
+ end_sequence ();
+ }
+#endif
+ if (target != op0)
+ emit_move_insn (target, op0);
+
+ temp = gen_label_rtx ();
+ do_compare_rtx_and_jump (target, cmpop1, comparison_code,
+ unsignedp, mode, NULL_RTX, NULL_RTX, temp);
+ }
emit_move_insn (target, op1);
- emit_label (op0);
+ emit_label (temp);
return target;
case BIT_NOT_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget,
+ VOIDmode, EXPAND_NORMAL);
if (modifier == EXPAND_STACK_PARM)
target = 0;
temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
subtarget = 0;
if (modifier == EXPAND_STACK_PARM)
target = 0;
- op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget,
+ VOIDmode, EXPAND_NORMAL);
return expand_shift (code, mode, op0, TREE_OPERAND (exp, 1), target,
unsignedp);
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
{
temp = expand_expr (TREE_OPERAND (exp, 0), original_target,
- VOIDmode, 0);
+ VOIDmode, EXPAND_NORMAL);
/* If temp is constant, we can just compute the result. */
if (GET_CODE (temp) == CONST_INT)
case TRUTH_NOT_EXPR:
if (modifier == EXPAND_STACK_PARM)
target = 0;
- op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target,
+ VOIDmode, EXPAND_NORMAL);
/* The parser is careful to generate TRUTH_NOT_EXPR
only with operands that are always zero or one. */
temp = expand_binop (mode, xor_optab, op0, const1_rtx,
return const0_rtx;
case COND_EXPR:
- /* If it's void, we don't need to worry about computing a value. */
- if (VOID_TYPE_P (TREE_TYPE (exp)))
- {
- tree pred = TREE_OPERAND (exp, 0);
- tree then_ = TREE_OPERAND (exp, 1);
- tree else_ = TREE_OPERAND (exp, 2);
-
- gcc_assert (TREE_CODE (then_) == GOTO_EXPR
- && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL
- && TREE_CODE (else_) == GOTO_EXPR
- && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL);
-
- jumpif (pred, label_rtx (GOTO_DESTINATION (then_)));
- return expand_expr (else_, const0_rtx, VOIDmode, 0);
- }
+ /* A COND_EXPR with its type being VOID_TYPE represents a
+ conditional jump and is handled in
+ expand_gimple_cond_expr. */
+ gcc_assert (!VOID_TYPE_P (TREE_TYPE (exp)));
/* Note that COND_EXPRs whose type is a structure or union
are required to be constructed to contain assignments of
op1 = gen_label_rtx ();
jumpifnot (TREE_OPERAND (exp, 0), op0);
store_expr (TREE_OPERAND (exp, 1), temp,
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ modifier == EXPAND_STACK_PARM,
+ false);
emit_jump_insn (gen_jump (op1));
emit_barrier ();
emit_label (op0);
store_expr (TREE_OPERAND (exp, 2), temp,
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ modifier == EXPAND_STACK_PARM,
+ false);
emit_label (op1);
OK_DEFER_POP;
return temp;
+ case VEC_COND_EXPR:
+ target = expand_vec_cond_expr (exp, target);
+ return target;
+
case MODIFY_EXPR:
{
- /* If lhs is complex, expand calls in rhs before computing it.
- That's so we don't compute a pointer and save it over a
- call. If lhs is simple, compute it first so we can give it
- as a target if the rhs is just a call. This avoids an
- extra temp and copy and that prevents a partial-subsumption
- which makes bad code. Actually we could treat
- component_ref's of vars like vars. */
-
tree lhs = TREE_OPERAND (exp, 0);
tree rhs = TREE_OPERAND (exp, 1);
+ gcc_assert (ignore);
+ expand_assignment (lhs, rhs, false);
+ return const0_rtx;
+ }
- temp = 0;
+ case GIMPLE_MODIFY_STMT:
+ {
+ tree lhs = GIMPLE_STMT_OPERAND (exp, 0);
+ tree rhs = GIMPLE_STMT_OPERAND (exp, 1);
+
+ gcc_assert (ignore);
/* Check for |= or &= of a bitfield of size one into another bitfield
of size 1. In this case, (unless we need the result of the
??? At this point, we can't get a BIT_FIELD_REF here. But if
things change so we do, this code should be enhanced to
support it. */
- if (ignore
- && TREE_CODE (lhs) == COMPONENT_REF
+ if (TREE_CODE (lhs) == COMPONENT_REF
&& (TREE_CODE (rhs) == BIT_IOR_EXPR
|| TREE_CODE (rhs) == BIT_AND_EXPR)
&& TREE_OPERAND (rhs, 0) == lhs
&& integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
{
rtx label = gen_label_rtx ();
-
+ int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
do_jump (TREE_OPERAND (rhs, 1),
- TREE_CODE (rhs) == BIT_IOR_EXPR ? label : 0,
- TREE_CODE (rhs) == BIT_AND_EXPR ? label : 0);
- expand_assignment (lhs, convert (TREE_TYPE (rhs),
- (TREE_CODE (rhs) == BIT_IOR_EXPR
- ? integer_one_node
- : integer_zero_node)),
- 0);
+ value ? label : 0,
+ value ? 0 : label);
+ expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
+ MOVE_NONTEMPORAL (exp));
do_pending_stack_adjust ();
emit_label (label);
return const0_rtx;
}
- temp = expand_assignment (lhs, rhs, ! ignore);
-
- return temp;
+ expand_assignment (lhs, rhs, MOVE_NONTEMPORAL (exp));
+ return const0_rtx;
}
case RETURN_EXPR:
return const0_rtx;
case ADDR_EXPR:
- return expand_expr_addr_expr (TREE_OPERAND (exp, 0), target,
- tmode, modifier);
+ return expand_expr_addr_expr (exp, target, tmode, modifier);
- /* COMPLEX type for Extended Pascal & Fortran */
case COMPLEX_EXPR:
- {
- enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
- rtx insns;
-
- /* Get the rtx code of the operands. */
- op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
-
- if (! target)
- target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
-
- start_sequence ();
+ /* Get the rtx code of the operands. */
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
+ op1 = expand_normal (TREE_OPERAND (exp, 1));
- /* Move the real (op0) and imaginary (op1) parts to their location. */
- emit_move_insn (gen_realpart (mode, target), op0);
- emit_move_insn (gen_imagpart (mode, target), op1);
-
- insns = get_insns ();
- end_sequence ();
+ if (!target)
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
- /* Complex construction should appear as a single unit. */
- /* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS,
- each with a separate pseudo as destination.
- It's not correct for flow to treat them as a unit. */
- if (GET_CODE (target) != CONCAT)
- emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
- else
- emit_insn (insns);
+ /* Move the real (op0) and imaginary (op1) parts to their location. */
+ write_complex_part (target, op0, false);
+ write_complex_part (target, op1, true);
- return target;
- }
+ return target;
case REALPART_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- return gen_realpart (mode, op0);
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
+ return read_complex_part (op0, false);
case IMAGPART_EXPR:
- op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- return gen_imagpart (mode, op0);
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
+ return read_complex_part (op0, true);
case RESX_EXPR:
expand_resx_expr (exp);
case POSTDECREMENT_EXPR:
case LOOP_EXPR:
case EXIT_EXPR:
- case LABELED_BLOCK_EXPR:
- case EXIT_BLOCK_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Lowered by gimplify.c. */
gcc_unreachable ();
+ case CHANGE_DYNAMIC_TYPE_EXPR:
+ /* This is ignored at the RTL level. The tree level set
+ DECL_POINTER_ALIAS_SET of any variable to be 0, which is
+ overkill for the RTL layer but is all that we can
+ represent. */
+ return const0_rtx;
+
case EXC_PTR_EXPR:
return get_exception_pointer (cfun);
return expand_expr_real (TREE_OPERAND (exp, 0), original_target, tmode,
modifier, alt_rtl);
+ case REALIGN_LOAD_EXPR:
+ {
+ tree oprnd0 = TREE_OPERAND (exp, 0);
+ tree oprnd1 = TREE_OPERAND (exp, 1);
+ tree oprnd2 = TREE_OPERAND (exp, 2);
+ rtx op2;
+
+ this_optab = optab_for_tree_code (code, type);
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
+ target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+ }
+
+ case DOT_PROD_EXPR:
+ {
+ tree oprnd0 = TREE_OPERAND (exp, 0);
+ tree oprnd1 = TREE_OPERAND (exp, 1);
+ tree oprnd2 = TREE_OPERAND (exp, 2);
+ rtx op2;
+
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ target = expand_widen_pattern_expr (exp, op0, op1, op2,
+ target, unsignedp);
+ return target;
+ }
+
+ case WIDEN_SUM_EXPR:
+ {
+ tree oprnd0 = TREE_OPERAND (exp, 0);
+ tree oprnd1 = TREE_OPERAND (exp, 1);
+
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, 0);
+ target = expand_widen_pattern_expr (exp, op0, NULL_RTX, op1,
+ target, unsignedp);
+ return target;
+ }
+
+ case REDUC_MAX_EXPR:
+ case REDUC_MIN_EXPR:
+ case REDUC_PLUS_EXPR:
+ {
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
+ this_optab = optab_for_tree_code (code, type);
+ temp = expand_unop (mode, this_optab, op0, target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+ }
+
+ case VEC_EXTRACT_EVEN_EXPR:
+ case VEC_EXTRACT_ODD_EXPR:
+ {
+ expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1),
+ NULL_RTX, &op0, &op1, 0);
+ this_optab = optab_for_tree_code (code, type);
+ temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
+ OPTAB_WIDEN);
+ gcc_assert (temp);
+ return temp;
+ }
+
+ case VEC_INTERLEAVE_HIGH_EXPR:
+ case VEC_INTERLEAVE_LOW_EXPR:
+ {
+ expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1),
+ NULL_RTX, &op0, &op1, 0);
+ this_optab = optab_for_tree_code (code, type);
+ temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
+ OPTAB_WIDEN);
+ gcc_assert (temp);
+ return temp;
+ }
+
+ case VEC_LSHIFT_EXPR:
+ case VEC_RSHIFT_EXPR:
+ {
+ target = expand_vec_shift_expr (exp, target);
+ return target;
+ }
+
+ case VEC_UNPACK_HI_EXPR:
+ case VEC_UNPACK_LO_EXPR:
+ {
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
+ this_optab = optab_for_tree_code (code, type);
+ temp = expand_widen_pattern_expr (exp, op0, NULL_RTX, NULL_RTX,
+ target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+ }
+
+ case VEC_UNPACK_FLOAT_HI_EXPR:
+ case VEC_UNPACK_FLOAT_LO_EXPR:
+ {
+ op0 = expand_normal (TREE_OPERAND (exp, 0));
+ /* The signedness is determined from input operand. */
+ this_optab = optab_for_tree_code (code,
+ TREE_TYPE (TREE_OPERAND (exp, 0)));
+ temp = expand_widen_pattern_expr
+ (exp, op0, NULL_RTX, NULL_RTX,
+ target, TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+
+ gcc_assert (temp);
+ return temp;
+ }
+
+ case VEC_WIDEN_MULT_HI_EXPR:
+ case VEC_WIDEN_MULT_LO_EXPR:
+ {
+ tree oprnd0 = TREE_OPERAND (exp, 0);
+ tree oprnd1 = TREE_OPERAND (exp, 1);
+
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, 0);
+ target = expand_widen_pattern_expr (exp, op0, op1, NULL_RTX,
+ target, unsignedp);
+ gcc_assert (target);
+ return target;
+ }
+
+ case VEC_PACK_TRUNC_EXPR:
+ case VEC_PACK_SAT_EXPR:
+ case VEC_PACK_FIX_TRUNC_EXPR:
+ {
+ mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ goto binop;
+ }
+
default:
return lang_hooks.expand_expr (exp, original_target, tmode,
modifier, alt_rtl);
HOST_WIDE_INT prec = TYPE_PRECISION (type);
if (target && GET_MODE (target) != GET_MODE (exp))
target = 0;
- if (TYPE_UNSIGNED (type))
+ /* For constant values, reduce using build_int_cst_type. */
+ if (GET_CODE (exp) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (exp);
+ tree t = build_int_cst_type (type, value);
+ return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
+ }
+ else if (TYPE_UNSIGNED (type))
{
rtx mask;
if (prec < HOST_BITS_PER_WIDE_INT)
tree
string_constant (tree arg, tree *ptr_offset)
{
+ tree array, offset, lower_bound;
STRIP_NOPS (arg);
- if (TREE_CODE (arg) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
- {
- *ptr_offset = size_zero_node;
- return TREE_OPERAND (arg, 0);
- }
- if (TREE_CODE (arg) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF
- && TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg, 0), 0)) == STRING_CST)
+ if (TREE_CODE (arg) == ADDR_EXPR)
{
- *ptr_offset = convert (sizetype, TREE_OPERAND (TREE_OPERAND (arg, 0), 1));
- return TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
+ if (TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
+ {
+ *ptr_offset = size_zero_node;
+ return TREE_OPERAND (arg, 0);
+ }
+ else if (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL)
+ {
+ array = TREE_OPERAND (arg, 0);
+ offset = size_zero_node;
+ }
+ else if (TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF)
+ {
+ array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
+ offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
+ if (TREE_CODE (array) != STRING_CST
+ && TREE_CODE (array) != VAR_DECL)
+ return 0;
+
+ /* Check if the array has a nonzero lower bound. */
+ lower_bound = array_ref_low_bound (TREE_OPERAND (arg, 0));
+ if (!integer_zerop (lower_bound))
+ {
+ /* If the offset and base aren't both constants, return 0. */
+ if (TREE_CODE (lower_bound) != INTEGER_CST)
+ return 0;
+ if (TREE_CODE (offset) != INTEGER_CST)
+ return 0;
+ /* Adjust offset by the lower bound. */
+ offset = size_diffop (fold_convert (sizetype, offset),
+ fold_convert (sizetype, lower_bound));
+ }
+ }
+ else
+ return 0;
}
- else if (TREE_CODE (arg) == PLUS_EXPR)
+ else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
{
tree arg0 = TREE_OPERAND (arg, 0);
tree arg1 = TREE_OPERAND (arg, 1);
STRIP_NOPS (arg1);
if (TREE_CODE (arg0) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST)
+ && (TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST
+ || TREE_CODE (TREE_OPERAND (arg0, 0)) == VAR_DECL))
{
- *ptr_offset = convert (sizetype, arg1);
- return TREE_OPERAND (arg0, 0);
+ array = TREE_OPERAND (arg0, 0);
+ offset = arg1;
}
else if (TREE_CODE (arg1) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST)
+ && (TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST
+ || TREE_CODE (TREE_OPERAND (arg1, 0)) == VAR_DECL))
{
- *ptr_offset = convert (sizetype, arg0);
- return TREE_OPERAND (arg1, 0);
+ array = TREE_OPERAND (arg1, 0);
+ offset = arg0;
}
+ else
+ return 0;
+ }
+ else
+ return 0;
+
+ if (TREE_CODE (array) == STRING_CST)
+ {
+ *ptr_offset = fold_convert (sizetype, offset);
+ return array;
+ }
+ else if (TREE_CODE (array) == VAR_DECL)
+ {
+ int length;
+
+ /* Variables initialized to string literals can be handled too. */
+ if (DECL_INITIAL (array) == NULL_TREE
+ || TREE_CODE (DECL_INITIAL (array)) != STRING_CST)
+ return 0;
+
+ /* If they are read-only, non-volatile and bind locally. */
+ if (! TREE_READONLY (array)
+ || TREE_SIDE_EFFECTS (array)
+ || ! targetm.binds_local_p (array))
+ return 0;
+
+ /* Avoid const char foo[4] = "abcde"; */
+ if (DECL_SIZE_UNIT (array) == NULL_TREE
+ || TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
+ || (length = TREE_STRING_LENGTH (DECL_INITIAL (array))) <= 0
+ || compare_tree_int (DECL_SIZE_UNIT (array), length) < 0)
+ return 0;
+
+ /* If variable is bigger than the string literal, OFFSET must be constant
+ and inside of the bounds of the string literal. */
+ offset = fold_convert (sizetype, offset);
+ if (compare_tree_int (DECL_SIZE_UNIT (array), length) > 0
+ && (! host_integerp (offset, 1)
+ || compare_tree_int (offset, length) >= 0))
+ return 0;
+
+ *ptr_offset = offset;
+ return DECL_INITIAL (array);
}
return 0;
return 0;
icode = setcc_gen_code[(int) code];
+
+ if (icode == CODE_FOR_nothing)
+ {
+ enum machine_mode wmode;
+
+ for (wmode = operand_mode;
+ icode == CODE_FOR_nothing && wmode != VOIDmode;
+ wmode = GET_MODE_WIDER_MODE (wmode))
+ icode = cstore_optab->handlers[(int) wmode].insn_code;
+ }
+
if (icode == CODE_FOR_nothing
|| (only_cheap && insn_data[(int) icode].operand[0].mode != mode))
{
if ((code == LT && integer_zerop (arg1))
|| (! only_cheap && code == GE && integer_zerop (arg1)))
;
- else if (BRANCH_COST >= 0
- && ! only_cheap && (code == NE || code == EQ)
+ else if (! only_cheap && (code == NE || code == EQ)
&& TREE_CODE (type) != REAL_TYPE
&& ((abs_optab->handlers[(int) operand_mode].insn_code
!= CODE_FOR_nothing)
target = gen_reg_rtx (GET_MODE (target));
emit_move_insn (target, invert ? const0_rtx : const1_rtx);
- result = compare_from_rtx (op0, op1, code, unsignedp,
- operand_mode, NULL_RTX);
- if (GET_CODE (result) == CONST_INT)
- return (((result == const0_rtx && ! invert)
- || (result != const0_rtx && invert))
- ? const0_rtx : const1_rtx);
-
- /* The code of RESULT may not match CODE if compare_from_rtx
- decided to swap its operands and reverse the original code.
-
- We know that compare_from_rtx returns either a CONST_INT or
- a new comparison code, so it is safe to just extract the
- code from RESULT. */
- code = GET_CODE (result);
-
label = gen_label_rtx ();
- gcc_assert (bcc_gen_fctn[(int) code]);
+ do_compare_rtx_and_jump (op0, op1, code, unsignedp, operand_mode, NULL_RTX,
+ NULL_RTX, label);
- emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
emit_move_insn (target, invert ? const1_rtx : const0_rtx);
emit_label (label);
if (GET_MODE_BITSIZE (TYPE_MODE (index_type)) > GET_MODE_BITSIZE (index_mode))
{
enum machine_mode omode = TYPE_MODE (index_type);
- rtx rangertx = expand_expr (range, NULL_RTX, VOIDmode, 0);
+ rtx rangertx = expand_normal (range);
/* We must handle the endpoints in the original mode. */
index_expr = build2 (MINUS_EXPR, index_type,
index_expr, minval);
minval = integer_zero_node;
- index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ index = expand_normal (index_expr);
emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
omode, 1, default_label);
/* Now we can safely truncate. */
{
if (TYPE_MODE (index_type) != index_mode)
{
- index_expr = convert (lang_hooks.types.type_for_size
- (index_bits, 0), index_expr);
- index_type = TREE_TYPE (index_expr);
+ index_type = lang_hooks.types.type_for_size (index_bits, 0);
+ index_expr = fold_convert (index_type, index_expr);
}
- index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ index = expand_normal (index_expr);
}
do_pending_stack_adjust ();
(index, op_mode))
index = copy_to_mode_reg (op_mode, index);
- op1 = expand_expr (minval, NULL_RTX, VOIDmode, 0);
+ op1 = expand_normal (minval);
op_mode = insn_data[(int) CODE_FOR_casesi].operand[1].mode;
op1 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (minval)),
(op1, op_mode))
op1 = copy_to_mode_reg (op_mode, op1);
- op2 = expand_expr (range, NULL_RTX, VOIDmode, 0);
+ op2 = expand_normal (range);
op_mode = insn_data[(int) CODE_FOR_casesi].operand[2].mode;
op2 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (range)),
if (! HAVE_tablejump)
return 0;
- index_expr = fold (build2 (MINUS_EXPR, index_type,
- convert (index_type, index_expr),
- convert (index_type, minval)));
- index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ index_expr = fold_build2 (MINUS_EXPR, index_type,
+ fold_convert (index_type, index_expr),
+ fold_convert (index_type, minval));
+ index = expand_normal (index_expr);
do_pending_stack_adjust ();
do_tablejump (index, TYPE_MODE (index_type),
convert_modes (TYPE_MODE (index_type),
TYPE_MODE (TREE_TYPE (range)),
- expand_expr (range, NULL_RTX,
- VOIDmode, 0),
+ expand_normal (range),
TYPE_UNSIGNED (TREE_TYPE (range))),
table_label, default_label);
return 1;
for (; i < units; ++i)
RTVEC_ELT (v, i) = CONST0_RTX (inner);
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ return gen_rtx_CONST_VECTOR (mode, v);
}
#include "gt-expr.h"