/* Convert tree expression to rtl instructions, for GNU compiler.
Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "machmode.h"
-#include "real.h"
#include "rtl.h"
#include "tree.h"
#include "flags.h"
#include "output.h"
#include "typeclass.h"
#include "toplev.h"
-#include "ggc.h"
#include "langhooks.h"
#include "intl.h"
#include "tm_p.h"
#include "df.h"
#include "diagnostic.h"
#include "ssaexpand.h"
+#include "target-globals.h"
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
static rtx const_vector_from_tree (tree);
static void write_complex_part (rtx, rtx, bool);
-/* Record for each mode whether we can move a register directly to or
- from an object of that mode in memory. If we can't, we won't try
- to use that mode directly when accessing a field of that mode. */
-
-static char direct_load[NUM_MACHINE_MODES];
-static char direct_store[NUM_MACHINE_MODES];
-
-/* Record for each mode whether we can float-extend from memory. */
-
-static bool float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES];
-
/* This macro is used to determine whether move_by_pieces should be called
to perform a structure copy. */
#ifndef MOVE_BY_PIECES_P
< (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()))
#endif
-/* This array records the insn_code of insns to perform block moves. */
-enum insn_code movmem_optab[NUM_MACHINE_MODES];
-
-/* This array records the insn_code of insns to perform block sets. */
-enum insn_code setmem_optab[NUM_MACHINE_MODES];
-
-/* These arrays record the insn_code of three different kinds of insns
- to perform block compares. */
-enum insn_code cmpstr_optab[NUM_MACHINE_MODES];
-enum insn_code cmpstrn_optab[NUM_MACHINE_MODES];
-enum insn_code cmpmem_optab[NUM_MACHINE_MODES];
-
-/* Synchronization primitives. */
-enum insn_code sync_add_optab[NUM_MACHINE_MODES];
-enum insn_code sync_sub_optab[NUM_MACHINE_MODES];
-enum insn_code sync_ior_optab[NUM_MACHINE_MODES];
-enum insn_code sync_and_optab[NUM_MACHINE_MODES];
-enum insn_code sync_xor_optab[NUM_MACHINE_MODES];
-enum insn_code sync_nand_optab[NUM_MACHINE_MODES];
-enum insn_code sync_old_add_optab[NUM_MACHINE_MODES];
-enum insn_code sync_old_sub_optab[NUM_MACHINE_MODES];
-enum insn_code sync_old_ior_optab[NUM_MACHINE_MODES];
-enum insn_code sync_old_and_optab[NUM_MACHINE_MODES];
-enum insn_code sync_old_xor_optab[NUM_MACHINE_MODES];
-enum insn_code sync_old_nand_optab[NUM_MACHINE_MODES];
-enum insn_code sync_new_add_optab[NUM_MACHINE_MODES];
-enum insn_code sync_new_sub_optab[NUM_MACHINE_MODES];
-enum insn_code sync_new_ior_optab[NUM_MACHINE_MODES];
-enum insn_code sync_new_and_optab[NUM_MACHINE_MODES];
-enum insn_code sync_new_xor_optab[NUM_MACHINE_MODES];
-enum insn_code sync_new_nand_optab[NUM_MACHINE_MODES];
-enum insn_code sync_compare_and_swap[NUM_MACHINE_MODES];
-enum insn_code sync_lock_test_and_set[NUM_MACHINE_MODES];
-enum insn_code sync_lock_release[NUM_MACHINE_MODES];
-
/* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */
#ifndef SLOW_UNALIGNED_ACCESS
PUT_MODE (mem, srcmode);
- if ((*insn_data[ic].operand[1].predicate) (mem, srcmode))
+ if (insn_operand_matches (ic, 1, mem))
float_extend_from_mem[mode][srcmode] = true;
}
}
/* Try converting directly if the insn is supported. */
- code = convert_optab_handler (tab, to_mode, from_mode)->insn_code;
+ code = convert_optab_handler (tab, to_mode, from_mode);
if (code != CODE_FOR_nothing)
{
emit_unop_insn (code, to, from,
enum machine_mode full_mode
= smallest_mode_for_size (GET_MODE_BITSIZE (to_mode), MODE_INT);
- gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)->insn_code
+ gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
!= CODE_FOR_nothing);
if (full_mode != from_mode)
from = convert_to_mode (full_mode, from, unsignedp);
- emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode)->insn_code,
+ emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
to, from, UNKNOWN);
return;
}
enum machine_mode full_mode
= smallest_mode_for_size (GET_MODE_BITSIZE (from_mode), MODE_INT);
- gcc_assert (convert_optab_handler (sext_optab, full_mode, from_mode)->insn_code
+ gcc_assert (convert_optab_handler (sext_optab, full_mode, from_mode)
!= CODE_FOR_nothing);
if (to_mode == full_mode)
{
- emit_unop_insn (convert_optab_handler (sext_optab, full_mode, from_mode)->insn_code,
+ emit_unop_insn (convert_optab_handler (sext_optab, full_mode,
+ from_mode),
to, from, UNKNOWN);
return;
}
new_from = gen_reg_rtx (full_mode);
- emit_unop_insn (convert_optab_handler (sext_optab, full_mode, from_mode)->insn_code,
+ emit_unop_insn (convert_optab_handler (sext_optab, full_mode, from_mode),
new_from, from, UNKNOWN);
/* else proceed to integer conversions below. */
{
enum machine_mode intermediate;
rtx tmp;
- tree shift_amount;
+ int shift_amount;
/* Search for a mode to convert via. */
for (intermediate = from_mode; intermediate != VOIDmode;
/* No suitable intermediate mode.
Generate what we need with shifts. */
- shift_amount = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (to_mode)
- - GET_MODE_BITSIZE (from_mode));
+ shift_amount = (GET_MODE_BITSIZE (to_mode)
+ - GET_MODE_BITSIZE (from_mode));
from = gen_lowpart (to_mode, force_reg (from_mode, from));
tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
to, unsignedp);
}
/* Support special truncate insns for certain modes. */
- if (convert_optab_handler (trunc_optab, to_mode, from_mode)->insn_code != CODE_FOR_nothing)
+ if (convert_optab_handler (trunc_optab, to_mode,
+ from_mode) != CODE_FOR_nothing)
{
- emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode)->insn_code,
+ emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
to, from, UNKNOWN);
return;
}
&& GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
&& CONST_INT_P (x) && INTVAL (x) < 0)
{
- HOST_WIDE_INT val = INTVAL (x);
-
- if (oldmode != VOIDmode
- && HOST_BITS_PER_WIDE_INT > GET_MODE_BITSIZE (oldmode))
- {
- int width = GET_MODE_BITSIZE (oldmode);
+ double_int val = uhwi_to_double_int (INTVAL (x));
- /* We need to zero extend VAL. */
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
- }
+ /* We need to zero extend VAL. */
+ if (oldmode != VOIDmode)
+ val = double_int_zext (val, GET_MODE_BITSIZE (oldmode));
- return immed_double_const (val, (HOST_WIDE_INT) 0, mode);
+ return immed_double_int_const (val, mode);
}
/* We can do this with a gen_lowpart if both desired and current modes
return temp;
}
\f
+/* Return the largest alignment we can use for doing a move (or store)
+ of MAX_PIECES. ALIGN is the largest alignment we could use. */
+
+static unsigned int
+alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
+{
+ enum machine_mode tmode;
+
+ tmode = mode_for_size (max_pieces * BITS_PER_UNIT, MODE_INT, 1);
+ if (align >= GET_MODE_ALIGNMENT (tmode))
+ align = GET_MODE_ALIGNMENT (tmode);
+ else
+ {
+ enum machine_mode tmode, xmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
+ tmode != VOIDmode;
+ xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) > max_pieces
+ || SLOW_UNALIGNED_ACCESS (tmode, align))
+ break;
+
+ align = MAX (align, GET_MODE_ALIGNMENT (xmode));
+ }
+
+ return align;
+}
+
+/* Return the widest integer mode no wider than SIZE. If no such mode
+ can be found, return VOIDmode. */
+
+static enum machine_mode
+widest_int_mode_for_size (unsigned int size)
+{
+ enum machine_mode tmode, mode = VOIDmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < size)
+ mode = tmode;
+
+ return mode;
+}
+
/* STORE_MAX_PIECES is the number of bytes at a time that we can
store efficiently. Due to internal GCC limitations, this is
MOVE_MAX_PIECES limited by the number of bytes GCC can represent
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (from));
rtx to_addr, from_addr = XEXP (from, 0);
unsigned int max_size = MOVE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from));
if (!(data.autinc_from && data.autinc_to)
&& move_by_pieces_ninsns (len, align, max_size) > 2)
{
- /* Find the mode of the largest move... */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Find the mode of the largest move...
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from)
{
data.to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
- icode = optab_handler (mov_optab, mode)->insn_code;
+ icode = optab_handler (mov_optab, mode);
if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
move_by_pieces_1 (GEN_FCN (icode), mode, &data);
unsigned int max_size)
{
unsigned HOST_WIDE_INT n_insns = 0;
- enum machine_mode tmode;
-
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode tmode, xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
while (max_size > 1)
{
- enum machine_mode mode = VOIDmode;
+ enum machine_mode mode;
enum insn_code icode;
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
- icode = optab_handler (mov_optab, mode)->insn_code;
+ icode = optab_handler (mov_optab, mode);
if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode);
rtx retval = 0;
unsigned int align;
+ gcc_assert (size);
+ if (CONST_INT_P (size)
+ && INTVAL (size) == 0)
+ return 0;
+
switch (method)
{
case BLOCK_OP_NORMAL:
gcc_unreachable ();
}
+ gcc_assert (MEM_P (x) && MEM_P (y));
align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
gcc_assert (align >= BITS_PER_UNIT);
- gcc_assert (MEM_P (x));
- gcc_assert (MEM_P (y));
- gcc_assert (size);
-
/* Make sure we've got BLKmode addresses; store_one_arg can decide that
block copy is more efficient for other large modes, e.g. DCmode. */
x = adjust_address (x, BLKmode, 0);
can be incorrect is coming from __builtin_memcpy. */
if (CONST_INT_P (size))
{
- if (INTVAL (size) == 0)
- return 0;
-
x = shallow_copy_rtx (x);
y = shallow_copy_rtx (y);
set_mem_size (x, size);
an outgoing argument. */
#if defined (REG_PARM_STACK_SPACE)
fn = emit_block_move_libcall_fn (false);
+ /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
+ depend on its argument. */
+ (void) fn;
if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
&& REG_PARM_STACK_SPACE (fn) != 0)
return false;
for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
{
enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
- rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
+ rtx tmp = targetm.calls.function_arg (&args_so_far, mode,
+ NULL_TREE, true);
if (!tmp || !REG_P (tmp))
return false;
if (targetm.calls.arg_partial_bytes (&args_so_far, mode, NULL, 1))
return false;
- FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1);
+ targetm.calls.function_arg_advance (&args_so_far, mode,
+ NULL_TREE, true);
}
}
return true;
emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
unsigned int expected_align, HOST_WIDE_INT expected_size)
{
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
int save_volatile_ok = volatile_ok;
enum machine_mode mode;
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- enum insn_code code = movmem_optab[(int) mode];
- insn_operand_predicate_fn pred;
+ enum insn_code code = direct_optab_handler (movmem_optab, mode);
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (x, BLKmode))
- && ((pred = insn_data[(int) code].operand[1].predicate) == 0
- || (*pred) (y, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
{
- rtx op2;
- rtx last = get_last_insn ();
- rtx pat;
-
- op2 = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (op2, mode))
- op2 = copy_to_mode_reg (mode, op2);
+ struct expand_operand ops[6];
+ unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
nice if there were some way to inform the backend, so
that it doesn't fail the expansion because it thinks
emitting the libcall would be more efficient. */
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (x, y, op2, opalign);
- else
- pat = GEN_FCN ((int) code) (x, y, op2, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
+ nops = insn_data[(int) code].n_generator_args;
+ gcc_assert (nops == 4 || nops == 6);
+
+ create_fixed_operand (&ops[0], x);
+ create_fixed_operand (&ops[1], y);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[2], size, mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops == 6)
+ {
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
+ }
+ if (maybe_expand_insn (code, nops, ops))
{
- emit_insn (pat);
volatile_ok = save_volatile_ok;
return true;
}
- else
- delete_insns_since (last);
}
}
if (nregs == 0)
return;
- if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
x = validize_mem (force_const_mem (mode, x));
/* See if the machine can do this with a load multiple insn. */
&& (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))
tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
(bytepos % slen0) * BITS_PER_UNIT,
- 1, NULL_RTX, mode, mode);
+ 1, false, NULL_RTX, mode, mode);
}
else
{
mem = assign_stack_temp (GET_MODE (src), slen, 0);
emit_move_insn (mem, src);
tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
- 0, 1, NULL_RTX, mode, mode);
+ 0, 1, false, NULL_RTX, mode, mode);
}
}
/* FIXME: A SIMD parallel will eventually lead to a subreg of a
tmps[i] = src;
else
tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
- bytepos * BITS_PER_UNIT, 1, NULL_RTX,
+ bytepos * BITS_PER_UNIT, 1, false, NULL_RTX,
mode, mode);
if (shift)
tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
- build_int_cst (NULL_TREE, shift), tmps[i], 0);
+ shift, tmps[i], 0);
}
}
{
int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
- build_int_cst (NULL_TREE, shift),
- tmps[i], 0);
+ shift, tmps[i], 0);
}
bytelen = adj_bytelen;
}
bitpos for the destination store (left justified). */
store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, copy_mode,
extract_bit_field (src, bitsize,
- xbitpos % BITS_PER_WORD, 1,
+ xbitpos % BITS_PER_WORD, 1, false,
NULL_RTX, copy_mode, copy_mode));
}
unsigned HOST_WIDE_INT l;
unsigned int max_size;
HOST_WIDE_INT offset = 0;
- enum machine_mode mode, tmode;
+ enum machine_mode mode;
enum insn_code icode;
int reverse;
- rtx cst;
+ /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
+ rtx cst ATTRIBUTE_UNUSED;
if (len == 0)
return 1;
: STORE_BY_PIECES_P (len, align)))
return 0;
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* We would first store what we can in the largest integer mode, then go to
successively smaller modes. */
reverse++)
{
l = len;
- mode = VOIDmode;
max_size = STORE_MAX_PIECES + 1;
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
- icode = optab_handler (mov_optab, mode)->insn_code;
+ icode = optab_handler (mov_optab, mode);
if (icode != CODE_FOR_nothing
&& align >= GET_MODE_ALIGNMENT (mode))
{
offset -= size;
cst = (*constfun) (constfundata, offset, mode);
- if (!LEGITIMATE_CONSTANT_P (cst))
+ if (!targetm.legitimate_constant_p (mode, cst))
return 0;
if (!reverse)
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to));
rtx to_addr = XEXP (data->to, 0);
unsigned int max_size = STORE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
data->offset = 0;
if (!data->autinc_to
&& move_by_pieces_ninsns (data->len, align, max_size) > 2)
{
- /* Determine the main mode we'll be using. */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Determine the main mode we'll be using.
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to)
{
data->to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* First store what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
- icode = optab_handler (mov_optab, mode)->insn_code;
+ icode = optab_handler (mov_optab, mode);
if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
store_by_pieces_2 (GEN_FCN (icode), mode, data);
including more than one in the machine description unless
the more limited one has some advantage. */
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
if (expected_align < align)
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- enum insn_code code = setmem_optab[(int) mode];
- insn_operand_predicate_fn pred;
+ enum insn_code code = direct_optab_handler (setmem_optab, mode);
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (object, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
{
- rtx opsize, opchar;
- enum machine_mode char_mode;
- rtx last = get_last_insn ();
- rtx pat;
-
- opsize = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[1].predicate;
- if (pred != 0 && ! (*pred) (opsize, mode))
- opsize = copy_to_mode_reg (mode, opsize);
-
- opchar = val;
- char_mode = insn_data[(int) code].operand[2].mode;
- if (char_mode != VOIDmode)
- {
- opchar = convert_to_mode (char_mode, opchar, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (opchar, char_mode))
- opchar = copy_to_mode_reg (char_mode, opchar);
- }
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign);
- else
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
+ struct expand_operand ops[6];
+ unsigned int nops;
+
+ nops = insn_data[(int) code].n_generator_args;
+ gcc_assert (nops == 4 || nops == 6);
+
+ create_fixed_operand (&ops[0], object);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[1], size, mode, true);
+ create_convert_operand_from (&ops[2], val, byte_mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops == 6)
{
- emit_insn (pat);
- return true;
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
}
- else
- delete_insns_since (last);
+ if (maybe_expand_insn (code, nops, ops))
+ return true;
}
}
}
return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
- true, NULL_RTX, imode, imode);
+ true, false, NULL_RTX, imode, imode);
}
\f
/* A subroutine of emit_move_insn_1. Yet another lowpart generator.
return NULL_RTX;
/* The target must support moves in this mode. */
- code = optab_handler (mov_optab, imode)->insn_code;
+ code = optab_handler (mov_optab, imode);
if (code == CODE_FOR_nothing)
return NULL_RTX;
/* Move floating point as parts. */
if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
- && optab_handler (mov_optab, GET_MODE_INNER (mode))->insn_code != CODE_FOR_nothing)
+ && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing)
try_int = false;
/* Not possible if the values are inherently not adjacent. */
else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
/* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
if (mode != CCmode)
{
- enum insn_code code = optab_handler (mov_optab, CCmode)->insn_code;
+ enum insn_code code = optab_handler (mov_optab, CCmode);
if (code != CODE_FOR_nothing)
{
x = emit_move_change_mode (CCmode, mode, x, true);
gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
- code = optab_handler (mov_optab, mode)->insn_code;
+ code = optab_handler (mov_optab, mode);
if (code != CODE_FOR_nothing)
return emit_insn (GEN_FCN (code) (x, y));
y_cst = y;
- if (!LEGITIMATE_CONSTANT_P (y))
+ if (!targetm.legitimate_constant_p (mode, y))
{
y = force_const_mem (mode, y);
&& (set = single_set (last_insn)) != NULL_RTX
&& SET_DEST (set) == x
&& ! rtx_equal_p (y_cst, SET_SRC (set)))
- set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+ set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
return last_insn;
}
REAL_VALUE_FROM_CONST_DOUBLE (r, y);
- if (LEGITIMATE_CONSTANT_P (y))
+ if (targetm.legitimate_constant_p (dstmode, y))
oldcost = rtx_cost (y, SET, speed);
else
oldcost = rtx_cost (force_const_mem (dstmode, y), SET, speed);
trunc_y = CONST_DOUBLE_FROM_REAL_VALUE (r, srcmode);
- if (LEGITIMATE_CONSTANT_P (trunc_y))
+ if (targetm.legitimate_constant_p (srcmode, trunc_y))
{
/* Skip if the target needs extra instructions to perform
the extension. */
- if (! (*insn_data[ic].operand[1].predicate) (trunc_y, srcmode))
+ if (!insn_operand_matches (ic, 1, trunc_y))
continue;
/* This is valid, but may not be cheaper than the original. */
newcost = rtx_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y), SET, speed);
unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
rtx dest;
enum insn_code icode;
- insn_operand_predicate_fn pred;
stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
/* If there is push pattern, use it. Otherwise try old way of throwing
MEM representing push operation to move expander. */
- icode = optab_handler (push_optab, mode)->insn_code;
+ icode = optab_handler (push_optab, mode);
if (icode != CODE_FOR_nothing)
{
- if (((pred = insn_data[(int) icode].operand[0].predicate)
- && !((*pred) (x, mode))))
- x = force_reg (mode, x);
- emit_insn (GEN_FCN (icode) (x));
- return;
+ struct expand_operand ops[1];
+
+ create_input_operand (&ops[0], x, mode);
+ if (maybe_expand_insn (icode, 1, ops))
+ return;
}
if (GET_MODE_SIZE (mode) == rounded_size)
dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
|| align >= BIGGEST_ALIGNMENT
|| (PUSH_ROUNDING (align / BITS_PER_UNIT)
== (align / BITS_PER_UNIT)))
- && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ && (HOST_WIDE_INT) PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
{
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
by setting SKIP to 0. */
skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
- if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
x = validize_mem (force_const_mem (mode, x));
/* If X is a hard register in a non-integer mode, copy it into a pseudo;
tree op0, op1;
rtx value, result;
optab binop;
+ gimple srcstmt;
+ enum tree_code code;
if (mode1 != VOIDmode
|| bitsize >= BITS_PER_WORD
return false;
STRIP_NOPS (src);
- if (!BINARY_CLASS_P (src)
- || TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ if (TREE_CODE (src) != SSA_NAME)
return false;
+ if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ return false;
+
+ srcstmt = get_gimple_for_ssa_name (src);
+ if (!srcstmt
+ || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
+ return false;
+
+ code = gimple_assign_rhs_code (srcstmt);
- op0 = TREE_OPERAND (src, 0);
- op1 = TREE_OPERAND (src, 1);
- STRIP_NOPS (op0);
+ op0 = gimple_assign_rhs1 (srcstmt);
+
+ /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
+ to find its initialization. Hopefully the initialization will
+ be from a bitfield load. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ {
+ gimple op0stmt = get_gimple_for_ssa_name (op0);
+
+ /* We want to eventually have OP0 be the same as TO, which
+ should be a bitfield. */
+ if (!op0stmt
+ || !is_gimple_assign (op0stmt)
+ || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
+ return false;
+ op0 = gimple_assign_rhs1 (op0stmt);
+ }
+
+ op1 = gimple_assign_rhs2 (srcstmt);
if (!operand_equal_p (to, op0, 0))
return false;
if (BYTES_BIG_ENDIAN)
bitpos = str_bitsize - bitpos - bitsize;
- switch (TREE_CODE (src))
+ switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
set_mem_expr (str_rtx, 0);
}
- binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ binop = code == PLUS_EXPR ? add_optab : sub_optab;
if (bitsize == 1 && bitpos + bitsize != str_bitsize)
{
value = expand_and (str_mode, value, const1_rtx, NULL);
binop = xor_optab;
}
value = expand_shift (LSHIFT_EXPR, str_mode, value,
- build_int_cst (NULL_TREE, bitpos),
- NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
result = expand_binop (str_mode, binop, str_rtx,
value, str_rtx, 1, OPTAB_WIDEN);
if (result != str_rtx)
set_mem_expr (str_rtx, 0);
}
- binop = TREE_CODE (src) == BIT_IOR_EXPR ? ior_optab : xor_optab;
+ binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
if (bitpos + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx)))
{
rtx mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << bitsize)
NULL_RTX);
}
value = expand_shift (LSHIFT_EXPR, GET_MODE (str_rtx), value,
- build_int_cst (NULL_TREE, bitpos),
- NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
result = expand_binop (GET_MODE (str_rtx), binop, str_rtx,
value, str_rtx, 1, OPTAB_WIDEN);
if (result != str_rtx)
{
rtx to_rtx = 0;
rtx result;
+ enum machine_mode mode;
+ int align;
+ enum insn_code icode;
/* Don't crash if the lhs of the assignment was erroneous. */
if (TREE_CODE (to) == ERROR_MARK)
{
- result = expand_normal (from);
+ expand_normal (from);
return;
}
if (operand_equal_p (to, from, 0))
return;
+ mode = TYPE_MODE (TREE_TYPE (to));
+ if ((TREE_CODE (to) == MEM_REF
+ || TREE_CODE (to) == TARGET_MEM_REF)
+ && mode != BLKmode
+ && ((align = MAX (TYPE_ALIGN (TREE_TYPE (to)),
+ get_object_alignment (to, BIGGEST_ALIGNMENT)))
+ < (signed) GET_MODE_ALIGNMENT (mode))
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
+ {
+ struct expand_operand ops[2];
+ enum machine_mode address_mode;
+ rtx reg, op0, mem;
+
+ reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ reg = force_not_mem (reg);
+
+ if (TREE_CODE (to) == MEM_REF)
+ {
+ addr_space_t as
+ = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (to, 1))));
+ tree base = TREE_OPERAND (to, 0);
+ address_mode = targetm.addr_space.address_mode (as);
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ op0 = convert_memory_address_addr_space (address_mode, op0, as);
+ if (!integer_zerop (TREE_OPERAND (to, 1)))
+ {
+ rtx off
+ = immed_double_int_const (mem_ref_offset (to), address_mode);
+ op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
+ }
+ op0 = memory_address_addr_space (mode, op0, as);
+ mem = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (mem, to, 0);
+ set_mem_addr_space (mem, as);
+ }
+ else if (TREE_CODE (to) == TARGET_MEM_REF)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (to));
+ struct mem_address addr;
+
+ get_address_description (to, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
+ op0 = memory_address_addr_space (mode, op0, as);
+ mem = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (mem, to, 0);
+ set_mem_addr_space (mem, as);
+ }
+ else
+ gcc_unreachable ();
+ if (TREE_THIS_VOLATILE (to))
+ MEM_VOLATILE_P (mem) = 1;
+
+ create_fixed_operand (&ops[0], mem);
+ create_input_operand (&ops[1], reg, mode);
+ /* The movmisalign<mode> pattern cannot fail, else the assignment would
+ silently be omitted. */
+ expand_insn (icode, 2, ops);
+ return;
+ }
+
/* Assignment of a structure component needs special treatment
if the structure component's rtx is not simply a MEM.
Assignment of an array element at a constant index, and assignment of
an array element in an unaligned packed structure field, has the same
problem. */
if (handled_component_p (to)
+ /* ??? We only need to handle MEM_REF here if the access is not
+ a full access of the base object. */
+ || (TREE_CODE (to) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (to, 0)) == ADDR_EXPR)
|| TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
{
enum machine_mode mode1;
to_rtx = expand_normal (tem);
+ /* If the bitfield is volatile, we want to access it in the
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (to_rtx))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ to_rtx = adjust_address (to_rtx, mode1, 0);
+ else if (GET_MODE (to_rtx) == VOIDmode)
+ to_rtx = adjust_address (to_rtx, BLKmode, 0);
+ }
+
if (offset != 0)
{
enum machine_mode address_mode;
offset));
}
+ /* No action is needed if the target is not a memory and the field
+ lies completely outside that target. This can occur if the source
+ code contains an out-of-bounds access to a small array. */
+ if (!MEM_P (to_rtx)
+ && GET_MODE (to_rtx) != BLKmode
+ && (unsigned HOST_WIDE_INT) bitpos
+ >= GET_MODE_BITSIZE (GET_MODE (to_rtx)))
+ {
+ expand_normal (from);
+ result = NULL;
+ }
/* Handle expand_expr of a complex value returning a CONCAT. */
- if (GET_CODE (to_rtx) == CONCAT)
+ else if (GET_CODE (to_rtx) == CONCAT)
{
- if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from))))
+ unsigned short mode_bitsize = GET_MODE_BITSIZE (GET_MODE (to_rtx));
+ if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from)))
+ && bitpos == 0
+ && bitsize == mode_bitsize)
+ result = store_expr (from, to_rtx, false, nontemporal);
+ else if (bitsize == mode_bitsize / 2
+ && (bitpos == 0 || bitpos == mode_bitsize / 2))
+ result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
+ nontemporal);
+ else if (bitpos + bitsize <= mode_bitsize / 2)
+ result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
+ mode1, from, TREE_TYPE (tem),
+ get_alias_set (to), nontemporal);
+ else if (bitpos >= mode_bitsize / 2)
+ result = store_field (XEXP (to_rtx, 1), bitsize,
+ bitpos - mode_bitsize / 2, mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
+ else if (bitpos == 0 && bitsize == mode_bitsize)
{
- gcc_assert (bitpos == 0);
- result = store_expr (from, to_rtx, false, nontemporal);
+ rtx from_rtx;
+ result = expand_normal (from);
+ from_rtx = simplify_gen_subreg (GET_MODE (to_rtx), result,
+ TYPE_MODE (TREE_TYPE (from)), 0);
+ emit_move_insn (XEXP (to_rtx, 0),
+ read_complex_part (from_rtx, false));
+ emit_move_insn (XEXP (to_rtx, 1),
+ read_complex_part (from_rtx, true));
}
else
{
- gcc_assert (bitpos == 0 || bitpos == GET_MODE_BITSIZE (mode1));
- result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
- nontemporal);
+ rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ GET_MODE_SIZE (GET_MODE (to_rtx)),
+ 0);
+ write_complex_part (temp, XEXP (to_rtx, 0), false);
+ write_complex_part (temp, XEXP (to_rtx, 1), true);
+ result = store_field (temp, bitsize, bitpos, mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
+ emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
+ emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
}
}
else
return;
}
- else if (TREE_CODE (to) == MISALIGNED_INDIRECT_REF)
- {
- addr_space_t as = ADDR_SPACE_GENERIC;
- enum machine_mode mode, op_mode1;
- enum insn_code icode;
- rtx reg, addr, mem, insn;
-
- if (POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (to, 0))))
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (to, 0))));
-
- reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
- reg = force_not_mem (reg);
-
- mode = TYPE_MODE (TREE_TYPE (to));
- addr = expand_expr (TREE_OPERAND (to, 0), NULL_RTX, VOIDmode,
- EXPAND_SUM);
- addr = memory_address_addr_space (mode, addr, as);
- mem = gen_rtx_MEM (mode, addr);
-
- set_mem_attributes (mem, to, 0);
- set_mem_addr_space (mem, as);
-
- icode = movmisalign_optab->handlers[mode].insn_code;
- gcc_assert (icode != CODE_FOR_nothing);
-
- op_mode1 = insn_data[icode].operand[1].mode;
- if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1)
- && op_mode1 != VOIDmode)
- reg = copy_to_mode_reg (op_mode1, reg);
-
- insn = GEN_FCN (icode) (mem, reg);
- emit_insn (insn);
- return;
- }
-
/* If the rhs is a function call and its value is not an aggregate,
call the function before we start to compute the lhs.
This is needed for correct code for cases such as
bool
emit_storent_insn (rtx to, rtx from)
{
- enum machine_mode mode = GET_MODE (to), imode;
- enum insn_code code = optab_handler (storent_optab, mode)->insn_code;
- rtx pattern;
+ struct expand_operand ops[2];
+ enum machine_mode mode = GET_MODE (to);
+ enum insn_code code = optab_handler (storent_optab, mode);
if (code == CODE_FOR_nothing)
return false;
- imode = insn_data[code].operand[0].mode;
- if (!insn_data[code].operand[0].predicate (to, imode))
- return false;
-
- imode = insn_data[code].operand[1].mode;
- if (!insn_data[code].operand[1].predicate (from, imode))
- {
- from = copy_to_mode_reg (imode, from);
- if (!insn_data[code].operand[1].predicate (from, imode))
- return false;
- }
-
- pattern = GEN_FCN (code) (to, from);
- if (pattern == NULL_RTX)
- return false;
-
- emit_insn (pattern);
- return true;
+ create_fixed_operand (&ops[0], to);
+ create_input_operand (&ops[1], from, mode);
+ return maybe_expand_insn (code, 2, ops);
}
/* Generate code for computing expression EXP,
return NULL_RTX;
}
- else if (TREE_CODE (exp) == STRING_CST
+ else if ((TREE_CODE (exp) == STRING_CST
+ || (TREE_CODE (exp) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == STRING_CST
+ && integer_zerop (TREE_OPERAND (exp, 1))))
&& !nontemporal && !call_param_p
- && TREE_STRING_LENGTH (exp) > 0
- && TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
+ && MEM_P (target))
{
/* Optimize initialization of an array with a STRING_CST. */
HOST_WIDE_INT exp_len, str_copy_len;
rtx dest_mem;
+ tree str = TREE_CODE (exp) == STRING_CST
+ ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
exp_len = int_expr_size (exp);
if (exp_len <= 0)
goto normal_expr;
- str_copy_len = strlen (TREE_STRING_POINTER (exp));
- if (str_copy_len < TREE_STRING_LENGTH (exp) - 1)
+ if (TREE_STRING_LENGTH (str) <= 0)
goto normal_expr;
- str_copy_len = TREE_STRING_LENGTH (exp);
- if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
+ str_copy_len = strlen (TREE_STRING_POINTER (str));
+ if (str_copy_len < TREE_STRING_LENGTH (str) - 1)
+ goto normal_expr;
+
+ str_copy_len = TREE_STRING_LENGTH (str);
+ if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0
+ && TREE_STRING_POINTER (str)[TREE_STRING_LENGTH (str) - 1] == '\0')
{
str_copy_len += STORE_MAX_PIECES - 1;
str_copy_len &= ~(STORE_MAX_PIECES - 1);
}
str_copy_len = MIN (str_copy_len, exp_len);
if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (exp)),
+ CONST_CAST (char *, TREE_STRING_POINTER (str)),
MEM_ALIGN (target), false))
goto normal_expr;
dest_mem = store_by_pieces (dest_mem,
str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (exp)),
+ CONST_CAST (char *,
+ TREE_STRING_POINTER (str)),
MEM_ALIGN (target), false,
exp_len > str_copy_len ? 1 : 0);
if (exp_len > str_copy_len)
/* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
but TARGET is not valid memory reference, TEMP will differ
from TARGET although it is really the same location. */
- && !(alt_rtl && rtx_equal_p (alt_rtl, target))
+ && !(alt_rtl
+ && rtx_equal_p (alt_rtl, target)
+ && !side_effects_p (alt_rtl)
+ && !side_effects_p (target))
/* If there's nothing to copy, don't bother. Don't call
expr_size unless necessary, because some front-ends (C++)
expr_size-hook must not be given objects that are not
{
int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
if (GET_MODE (target) == BLKmode
- || GET_MODE (temp) == BLKmode)
+ && GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
(call_param_p
? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL));
+ else if (GET_MODE (target) == BLKmode)
+ store_bit_field (target, INTVAL (expr_size (exp)) * BITS_PER_UNIT,
+ 0, GET_MODE (temp), temp);
else
convert_move (target, temp, unsignedp);
}
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
{
- HOST_WIDE_INT mult;
+ HOST_WIDE_INT mult = 1;
- mult = 1;
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lo_index = TREE_OPERAND (purpose, 0);
break;
default:
- nz_elts += mult;
- elt_count += mult;
+ {
+ HOST_WIDE_INT tc = count_type_elements (TREE_TYPE (value), true);
+ if (tc < 1)
+ tc = 1;
+ nz_elts += mult * tc;
+ elt_count += mult * tc;
- if (const_from_elts_p && const_p)
- const_p = initializer_constant_valid_p (value, TREE_TYPE (value))
- != NULL_TREE;
+ if (const_from_elts_p && const_p)
+ const_p = initializer_constant_valid_p (value, TREE_TYPE (value))
+ != NULL_TREE;
+ }
break;
}
}
HOST_WIDE_INT n = 0, t;
tree f;
- for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
if (TREE_CODE (f) == FIELD_DECL)
{
t = count_type_elements (TREE_TYPE (f), false);
/* Check for structures with flexible array member. */
tree tf = TREE_TYPE (f);
if (allow_flexarr
- && TREE_CHAIN (f) == NULL
+ && DECL_CHAIN (f) == NULL
&& TREE_CODE (tf) == ARRAY_TYPE
&& TYPE_DOMAIN (tf)
&& TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
{
enum machine_mode mode = GET_MODE (target);
- icode = (int) optab_handler (vec_init_optab, mode)->insn_code;
+ icode = (int) optab_handler (vec_init_optab, mode);
if (icode != CODE_FOR_nothing)
{
unsigned int i;
int n_elts_here = tree_low_cst
(int_const_binop (TRUNC_DIV_EXPR,
TYPE_SIZE (TREE_TYPE (value)),
- TYPE_SIZE (elttype), 0), 1);
+ TYPE_SIZE (elttype)), 1);
count += n_elts_here;
if (mostly_zeros_p (value))
operations. */
|| (bitsize >= 0
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
- && compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) != 0))
+ && compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) != 0)
+ /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
+ decl we must use bitfield operations. */
+ || (bitsize >= 0
+ && TREE_CODE (exp) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0),0 ))
+ && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
{
rtx temp;
gimple nop_def;
&& bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (temp))
&& TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
- size_int (GET_MODE_BITSIZE (GET_MODE (temp))
- - bitsize),
+ GET_MODE_BITSIZE (GET_MODE (temp)) - bitsize,
NULL_RTX, 1);
/* Unless MODE is VOIDmode or BLKmode, convert TEMP to
if (to_rtx == target)
to_rtx = copy_rtx (to_rtx);
- MEM_SET_IN_STRUCT_P (to_rtx, 1);
+ if (!MEM_SCALAR_P (to_rtx))
+ MEM_IN_STRUCT_P (to_rtx) = 1;
if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
set_mem_alias_set (to_rtx, alias_set);
enum machine_mode mode = VOIDmode;
bool blkmode_bitfield = false;
tree offset = size_zero_node;
- tree bit_offset = bitsize_zero_node;
+ double_int bit_offset = double_int_zero;
/* First get the mode, signedness, and size. We do this from just the
outermost expression. */
mode = DECL_MODE (field);
else if (DECL_MODE (field) == BLKmode)
blkmode_bitfield = true;
+ else if (TREE_THIS_VOLATILE (exp)
+ && flag_strict_volatile_bitfields > 0)
+ /* Volatile bitfields should be accessed in the mode of the
+ field's type, not the mode computed based on the bit
+ size. */
+ mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
*punsignedp = DECL_UNSIGNED (field);
}
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset = size_binop (PLUS_EXPR, bit_offset,
- TREE_OPERAND (exp, 2));
+ bit_offset
+ = double_int_add (bit_offset,
+ tree_to_double_int (TREE_OPERAND (exp, 2)));
break;
case COMPONENT_REF:
break;
offset = size_binop (PLUS_EXPR, offset, this_offset);
- bit_offset = size_binop (PLUS_EXPR, bit_offset,
- DECL_FIELD_BIT_OFFSET (field));
+ bit_offset = double_int_add (bit_offset,
+ tree_to_double_int
+ (DECL_FIELD_BIT_OFFSET (field)));
/* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
}
break;
case IMAGPART_EXPR:
- bit_offset = size_binop (PLUS_EXPR, bit_offset,
- bitsize_int (*pbitsize));
+ bit_offset = double_int_add (bit_offset,
+ uhwi_to_double_int (*pbitsize));
break;
case VIEW_CONVERT_EXPR:
goto done;
break;
+ case MEM_REF:
+ /* Hand back the decl for MEM[&decl, off]. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
+ {
+ tree off = TREE_OPERAND (exp, 1);
+ if (!integer_zerop (off))
+ {
+ double_int boff, coff = mem_ref_offset (exp);
+ boff = double_int_lshift (coff,
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT, true);
+ bit_offset = double_int_add (bit_offset, boff);
+ }
+ exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ }
+ goto done;
+
default:
goto done;
}
this conversion. */
if (host_integerp (offset, 0))
{
- double_int tem = double_int_mul (tree_to_double_int (offset),
- uhwi_to_double_int (BITS_PER_UNIT));
- tem = double_int_add (tem, tree_to_double_int (bit_offset));
+ double_int tem = double_int_lshift (tree_to_double_int (offset),
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT, true);
+ tem = double_int_add (tem, bit_offset);
if (double_int_fits_in_shwi_p (tem))
{
*pbitpos = double_int_to_shwi (tem);
/* Otherwise, split it up. */
if (offset)
{
- *pbitpos = tree_low_cst (bit_offset, 0);
+ *pbitpos = double_int_to_shwi (bit_offset);
*poffset = offset;
}
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
- for (idx = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
- idx++)
+ FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce)
if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
|| !safe_from_p (x, ce->value, 0))
return 0;
}
break;
- case MISALIGNED_INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case INDIRECT_REF:
+ case MEM_REF:
if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
get_alias_set (exp)))
return MAX (factor, talign);
}
\f
-/* Return &VAR expression for emulated thread local VAR. */
-
-static tree
-emutls_var_address (tree var)
-{
- tree emuvar = emutls_decl (var);
- tree fn = built_in_decls [BUILT_IN_EMUTLS_GET_ADDRESS];
- tree arg = build_fold_addr_expr_with_type (emuvar, ptr_type_node);
- tree arglist = build_tree_list (NULL_TREE, arg);
- tree call = build_function_call_expr (UNKNOWN_LOCATION, fn, arglist);
- return fold_convert (build_pointer_type (TREE_TYPE (var)), call);
-}
-\f
-
/* Subroutine of expand_expr. Expand the two operands of a binary
expression EXP0 and EXP1 placing the results in OP0 and OP1.
The value may be stored in TARGET if TARGET is nonzero. The
/* This case will happen via recursion for &a->b. */
return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
+ case MEM_REF:
+ {
+ tree tem = TREE_OPERAND (exp, 0);
+ if (!integer_zerop (TREE_OPERAND (exp, 1)))
+ tem = build2 (POINTER_PLUS_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)),
+ tem,
+ double_int_to_tree (sizetype, mem_ref_offset (exp)));
+ return expand_expr (tem, target, tmode, modifier);
+ }
+
case CONST_DECL:
/* Expand the initializer like constants above. */
return XEXP (expand_expr_constant (DECL_INITIAL (exp), 0, modifier), 0);
inner = TREE_OPERAND (exp, 0);
break;
- case VAR_DECL:
- /* TLS emulation hook - replace __thread VAR's &VAR with
- __emutls_get_address (&_emutls.VAR). */
- if (! targetm.have_tls
- && TREE_CODE (exp) == VAR_DECL
- && DECL_THREAD_LOCAL_P (exp))
- {
- exp = emutls_var_address (exp);
- return expand_expr (exp, target, tmode, modifier);
- }
- /* Fall through. */
-
default:
/* If the object is a DECL, then expand it for its rtl. Don't bypass
expand_expr, as that can have various side effects; LABEL_DECLs for
tmp = convert_memory_address_addr_space (tmode, tmp, as);
if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
- result = gen_rtx_PLUS (tmode, result, tmp);
+ result = simplify_gen_binary (PLUS, tmode, result, tmp);
else
{
subtarget = bitpos ? NULL_RTX : target;
optab this_optab;
rtx subtarget, original_target;
int ignore;
- tree subexp0, subexp1;
bool reduce_bit_field;
- gimple subexp0_def, subexp1_def;
- tree top0, top1;
location_t loc = ops->location;
- tree treeop0, treeop1;
+ tree treeop0, treeop1, treeop2;
#define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
? reduce_to_bit_field_precision ((expr), \
target, \
treeop0 = ops->op0;
treeop1 = ops->op1;
+ treeop2 = ops->op2;
/* We should be called only on simple (binary or unary) expressions,
exactly those that are valid in gimple expressions that aren't
GIMPLE_SINGLE_RHS (or invalid). */
gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
- || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS);
+ || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
+ || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
ignore = (target == const0_rtx
|| ((CONVERT_EXPR_CODE_P (code)
else if (CONSTANT_P (op0))
{
tree inner_type = TREE_TYPE (treeop0);
- enum machine_mode inner_mode = TYPE_MODE (inner_type);
+ enum machine_mode inner_mode = GET_MODE (op0);
+
+ if (inner_mode == VOIDmode)
+ inner_mode = TYPE_MODE (inner_type);
if (modifier == EXPAND_INITIALIZER)
op0 = simplify_gen_subreg (mode, op0, inner_mode,
fold_convert_loc (loc, ssizetype,
treeop1));
case PLUS_EXPR:
-
- /* Check if this is a case for multiplication and addition. */
- if ((TREE_CODE (type) == INTEGER_TYPE
- || TREE_CODE (type) == FIXED_POINT_TYPE)
- && (subexp0_def = get_def_for_expr (treeop0,
- MULT_EXPR)))
- {
- tree subsubexp0, subsubexp1;
- gimple subsubexp0_def, subsubexp1_def;
- enum tree_code this_code;
-
- this_code = TREE_CODE (type) == INTEGER_TYPE ? NOP_EXPR
- : FIXED_CONVERT_EXPR;
- subsubexp0 = gimple_assign_rhs1 (subexp0_def);
- subsubexp0_def = get_def_for_expr (subsubexp0, this_code);
- subsubexp1 = gimple_assign_rhs2 (subexp0_def);
- subsubexp1_def = get_def_for_expr (subsubexp1, this_code);
- if (subsubexp0_def && subsubexp1_def
- && (top0 = gimple_assign_rhs1 (subsubexp0_def))
- && (top1 = gimple_assign_rhs1 (subsubexp1_def))
- && (TYPE_PRECISION (TREE_TYPE (top0))
- < TYPE_PRECISION (TREE_TYPE (subsubexp0)))
- && (TYPE_PRECISION (TREE_TYPE (top0))
- == TYPE_PRECISION (TREE_TYPE (top1)))
- && (TYPE_UNSIGNED (TREE_TYPE (top0))
- == TYPE_UNSIGNED (TREE_TYPE (top1))))
- {
- tree op0type = TREE_TYPE (top0);
- enum machine_mode innermode = TYPE_MODE (op0type);
- bool zextend_p = TYPE_UNSIGNED (op0type);
- bool sat_p = TYPE_SATURATING (TREE_TYPE (subsubexp0));
- if (sat_p == 0)
- this_optab = zextend_p ? umadd_widen_optab : smadd_widen_optab;
- else
- this_optab = zextend_p ? usmadd_widen_optab
- : ssmadd_widen_optab;
- if (mode == GET_MODE_2XWIDER_MODE (innermode)
- && (optab_handler (this_optab, mode)->insn_code
- != CODE_FOR_nothing))
- {
- expand_operands (top0, top1, NULL_RTX, &op0, &op1,
- EXPAND_NORMAL);
- op2 = expand_expr (treeop1, subtarget,
- VOIDmode, EXPAND_NORMAL);
- temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
- target, unsignedp);
- gcc_assert (temp);
- return REDUCE_BIT_FIELD (temp);
- }
- }
- }
-
/* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
something else, make sure we add the register to the constant and
then to the other thing. This case can occur during strength
}
}
+ /* Use TER to expand pointer addition of a negated value
+ as pointer subtraction. */
+ if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
+ || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
+ && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
+ && TREE_CODE (treeop1) == SSA_NAME
+ && TYPE_MODE (TREE_TYPE (treeop0))
+ == TYPE_MODE (TREE_TYPE (treeop1)))
+ {
+ gimple def = get_def_for_expr (treeop1, NEGATE_EXPR);
+ if (def)
+ {
+ treeop1 = gimple_assign_rhs1 (def);
+ code = MINUS_EXPR;
+ goto do_minus;
+ }
+ }
+
/* No sense saving up arithmetic to be done
if it's all in the wrong mode to form part of an address.
And force_operand won't know whether to sign-extend or
return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
case MINUS_EXPR:
- /* Check if this is a case for multiplication and subtraction. */
- if ((TREE_CODE (type) == INTEGER_TYPE
- || TREE_CODE (type) == FIXED_POINT_TYPE)
- && (subexp1_def = get_def_for_expr (treeop1,
- MULT_EXPR)))
- {
- tree subsubexp0, subsubexp1;
- gimple subsubexp0_def, subsubexp1_def;
- enum tree_code this_code;
-
- this_code = TREE_CODE (type) == INTEGER_TYPE ? NOP_EXPR
- : FIXED_CONVERT_EXPR;
- subsubexp0 = gimple_assign_rhs1 (subexp1_def);
- subsubexp0_def = get_def_for_expr (subsubexp0, this_code);
- subsubexp1 = gimple_assign_rhs2 (subexp1_def);
- subsubexp1_def = get_def_for_expr (subsubexp1, this_code);
- if (subsubexp0_def && subsubexp1_def
- && (top0 = gimple_assign_rhs1 (subsubexp0_def))
- && (top1 = gimple_assign_rhs1 (subsubexp1_def))
- && (TYPE_PRECISION (TREE_TYPE (top0))
- < TYPE_PRECISION (TREE_TYPE (subsubexp0)))
- && (TYPE_PRECISION (TREE_TYPE (top0))
- == TYPE_PRECISION (TREE_TYPE (top1)))
- && (TYPE_UNSIGNED (TREE_TYPE (top0))
- == TYPE_UNSIGNED (TREE_TYPE (top1))))
- {
- tree op0type = TREE_TYPE (top0);
- enum machine_mode innermode = TYPE_MODE (op0type);
- bool zextend_p = TYPE_UNSIGNED (op0type);
- bool sat_p = TYPE_SATURATING (TREE_TYPE (subsubexp0));
- if (sat_p == 0)
- this_optab = zextend_p ? umsub_widen_optab : smsub_widen_optab;
- else
- this_optab = zextend_p ? usmsub_widen_optab
- : ssmsub_widen_optab;
- if (mode == GET_MODE_2XWIDER_MODE (innermode)
- && (optab_handler (this_optab, mode)->insn_code
- != CODE_FOR_nothing))
- {
- expand_operands (top0, top1, NULL_RTX, &op0, &op1,
- EXPAND_NORMAL);
- op2 = expand_expr (treeop0, subtarget,
- VOIDmode, EXPAND_NORMAL);
- temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
- target, unsignedp);
- gcc_assert (temp);
- return REDUCE_BIT_FIELD (temp);
- }
- }
- }
-
+ do_minus:
/* For initializers, we are allowed to return a MINUS of two
symbolic constants. Here we handle all cases when both operands
are constant. */
goto binop2;
- case MULT_EXPR:
- /* If this is a fixed-point operation, then we cannot use the code
- below because "expand_mult" doesn't support sat/no-sat fixed-point
- multiplications. */
- if (ALL_FIXED_POINT_MODE_P (mode))
- goto binop;
+ case WIDEN_MULT_PLUS_EXPR:
+ case WIDEN_MULT_MINUS_EXPR:
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (treeop2);
+ target = expand_widen_pattern_expr (ops, op0, op1, op2,
+ target, unsignedp);
+ return target;
+ case WIDEN_MULT_EXPR:
/* If first operand is constant, swap them.
Thus the following special case checks need only
check the second operand. */
treeop1 = t1;
}
- /* Attempt to return something suitable for generating an
- indexed address, for machines that support that. */
-
- if (modifier == EXPAND_SUM && mode == ptr_mode
- && host_integerp (treeop1, 0))
- {
- tree exp1 = treeop1;
-
- op0 = expand_expr (treeop0, subtarget, VOIDmode,
- EXPAND_SUM);
-
- if (!REG_P (op0))
- op0 = force_operand (op0, NULL_RTX);
- if (!REG_P (op0))
- op0 = copy_to_mode_reg (mode, op0);
-
- return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
- gen_int_mode (tree_low_cst (exp1, 0),
- TYPE_MODE (TREE_TYPE (exp1)))));
- }
-
- if (modifier == EXPAND_STACK_PARM)
- target = 0;
-
- /* Check for multiplying things that have been extended
- from a narrower type. If this machine supports multiplying
- in that narrower type with a result in the desired type,
- do it that way, and avoid the explicit type-conversion. */
-
- subexp0 = treeop0;
- subexp1 = treeop1;
- subexp0_def = get_def_for_expr (subexp0, NOP_EXPR);
- subexp1_def = get_def_for_expr (subexp1, NOP_EXPR);
- top0 = top1 = NULL_TREE;
-
/* First, check if we have a multiplication of one signed and one
unsigned operand. */
- if (subexp0_def
- && (top0 = gimple_assign_rhs1 (subexp0_def))
- && subexp1_def
- && (top1 = gimple_assign_rhs1 (subexp1_def))
- && TREE_CODE (type) == INTEGER_TYPE
- && (TYPE_PRECISION (TREE_TYPE (top0))
- < TYPE_PRECISION (TREE_TYPE (subexp0)))
- && (TYPE_PRECISION (TREE_TYPE (top0))
- == TYPE_PRECISION (TREE_TYPE (top1)))
- && (TYPE_UNSIGNED (TREE_TYPE (top0))
- != TYPE_UNSIGNED (TREE_TYPE (top1))))
+ if (TREE_CODE (treeop1) != INTEGER_CST
+ && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
+ != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
{
- enum machine_mode innermode
- = TYPE_MODE (TREE_TYPE (top0));
+ enum machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
this_optab = usmul_widen_optab;
- if (mode == GET_MODE_WIDER_MODE (innermode))
+ if (mode == GET_MODE_2XWIDER_MODE (innermode))
{
- if (optab_handler (this_optab, mode)->insn_code != CODE_FOR_nothing)
+ if (optab_handler (this_optab, mode) != CODE_FOR_nothing)
{
- if (TYPE_UNSIGNED (TREE_TYPE (top0)))
- expand_operands (top0, top1, NULL_RTX, &op0, &op1,
+ if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
EXPAND_NORMAL);
else
- expand_operands (top0, top1, NULL_RTX, &op1, &op0,
+ expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
EXPAND_NORMAL);
-
goto binop3;
}
}
}
- /* Check for a multiplication with matching signedness. If
- valid, TOP0 and TOP1 were set in the previous if
- condition. */
- else if (top0
- && TREE_CODE (type) == INTEGER_TYPE
- && (TYPE_PRECISION (TREE_TYPE (top0))
- < TYPE_PRECISION (TREE_TYPE (subexp0)))
- && ((TREE_CODE (subexp1) == INTEGER_CST
- && int_fits_type_p (subexp1, TREE_TYPE (top0))
- /* Don't use a widening multiply if a shift will do. */
- && ((GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (subexp1)))
- > HOST_BITS_PER_WIDE_INT)
- || exact_log2 (TREE_INT_CST_LOW (subexp1)) < 0))
- ||
- (top1
- && (TYPE_PRECISION (TREE_TYPE (top1))
- == TYPE_PRECISION (TREE_TYPE (top0))
- /* If both operands are extended, they must either both
- be zero-extended or both be sign-extended. */
- && (TYPE_UNSIGNED (TREE_TYPE (top1))
- == TYPE_UNSIGNED (TREE_TYPE (top0)))))))
+ /* Check for a multiplication with matching signedness. */
+ else if ((TREE_CODE (treeop1) == INTEGER_CST
+ && int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
+ || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
+ == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
{
- tree op0type = TREE_TYPE (top0);
+ tree op0type = TREE_TYPE (treeop0);
enum machine_mode innermode = TYPE_MODE (op0type);
bool zextend_p = TYPE_UNSIGNED (op0type);
optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
- if (mode == GET_MODE_2XWIDER_MODE (innermode))
+ if (mode == GET_MODE_2XWIDER_MODE (innermode)
+ && TREE_CODE (treeop0) != INTEGER_CST)
{
- if (optab_handler (this_optab, mode)->insn_code != CODE_FOR_nothing)
+ if (optab_handler (this_optab, mode) != CODE_FOR_nothing)
{
- if (TREE_CODE (subexp1) == INTEGER_CST)
- expand_operands (top0, subexp1, NULL_RTX, &op0, &op1,
- EXPAND_NORMAL);
- else
- expand_operands (top0, top1, NULL_RTX, &op0, &op1,
- EXPAND_NORMAL);
- goto binop3;
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
+ EXPAND_NORMAL);
+ temp = expand_widening_mult (mode, op0, op1, target,
+ unsignedp, this_optab);
+ return REDUCE_BIT_FIELD (temp);
}
- else if (optab_handler (other_optab, mode)->insn_code != CODE_FOR_nothing
- && innermode == word_mode)
+ if (optab_handler (other_optab, mode) != CODE_FOR_nothing
+ && innermode == word_mode)
{
rtx htem, hipart;
- op0 = expand_normal (top0);
- if (TREE_CODE (subexp1) == INTEGER_CST)
+ op0 = expand_normal (treeop0);
+ if (TREE_CODE (treeop1) == INTEGER_CST)
op1 = convert_modes (innermode, mode,
- expand_normal (subexp1), unsignedp);
+ expand_normal (treeop1), unsignedp);
else
- op1 = expand_normal (top1);
+ op1 = expand_normal (treeop1);
temp = expand_binop (mode, other_optab, op0, op1, target,
unsignedp, OPTAB_LIB_WIDEN);
hipart = gen_highpart (innermode, temp);
}
}
}
- expand_operands (subexp0, subexp1, subtarget, &op0, &op1, EXPAND_NORMAL);
+ treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
+ treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
+ expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
+ return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
+
+ case FMA_EXPR:
+ {
+ optab opt = fma_optab;
+ gimple def0, def2;
+
+ /* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
+ call. */
+ if (optab_handler (fma_optab, mode) == CODE_FOR_nothing)
+ {
+ tree fn = mathfn_built_in (TREE_TYPE (treeop0), BUILT_IN_FMA);
+ tree call_expr;
+
+ gcc_assert (fn != NULL_TREE);
+ call_expr = build_call_expr (fn, 3, treeop0, treeop1, treeop2);
+ return expand_builtin (call_expr, target, subtarget, mode, false);
+ }
+
+ def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
+ def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
+
+ op0 = op2 = NULL;
+
+ if (def0 && def2
+ && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnms_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+ else if (def0
+ && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnma_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ }
+ else if (def2
+ && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fms_optab;
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+
+ if (op0 == NULL)
+ op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
+ if (op2 == NULL)
+ op2 = expand_normal (treeop2);
+ op1 = expand_normal (treeop1);
+
+ return expand_ternary_op (TYPE_MODE (type), opt,
+ op0, op1, op2, target, 0);
+ }
+
+ case MULT_EXPR:
+ /* If this is a fixed-point operation, then we cannot use the code
+ below because "expand_mult" doesn't support sat/no-sat fixed-point
+ multiplications. */
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ goto binop;
+
+ /* If first operand is constant, swap them.
+ Thus the following special case checks need only
+ check the second operand. */
+ if (TREE_CODE (treeop0) == INTEGER_CST)
+ {
+ tree t1 = treeop0;
+ treeop0 = treeop1;
+ treeop1 = t1;
+ }
+
+ /* Attempt to return something suitable for generating an
+ indexed address, for machines that support that. */
+
+ if (modifier == EXPAND_SUM && mode == ptr_mode
+ && host_integerp (treeop1, 0))
+ {
+ tree exp1 = treeop1;
+
+ op0 = expand_expr (treeop0, subtarget, VOIDmode,
+ EXPAND_SUM);
+
+ if (!REG_P (op0))
+ op0 = force_operand (op0, NULL_RTX);
+ if (!REG_P (op0))
+ op0 = copy_to_mode_reg (mode, op0);
+
+ return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
+ gen_int_mode (tree_low_cst (exp1, 0),
+ TYPE_MODE (TREE_TYPE (exp1)))));
+ }
+
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+
+ expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
case TRUNC_DIV_EXPR:
target = 0;
op0 = expand_expr (treeop0, subtarget,
VOIDmode, EXPAND_NORMAL);
- temp = expand_shift (code, mode, op0, treeop1, target,
- unsignedp);
+ temp = expand_variable_shift (code, mode, op0, treeop1, target,
+ unsignedp);
if (code == LSHIFT_EXPR)
temp = REDUCE_BIT_FIELD (temp);
return temp;
op1 = gen_label_rtx ();
jumpifnot_1 (code, treeop0, treeop1, op1, -1);
- emit_move_insn (target, const1_rtx);
+ if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
+ emit_move_insn (target, constm1_rtx);
+ else
+ emit_move_insn (target, const1_rtx);
emit_label (op1);
return target;
case VEC_UNPACK_LO_EXPR:
{
op0 = expand_normal (treeop0);
- this_optab = optab_for_tree_code (code, type, optab_default);
temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
target, unsignedp);
gcc_assert (temp);
{
op0 = expand_normal (treeop0);
/* The signedness is determined from input operand. */
- this_optab = optab_for_tree_code (code,
- TREE_TYPE (treeop0),
- optab_default);
temp = expand_widen_pattern_expr
(ops, op0, NULL_RTX, NULL_RTX,
target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
mode = TYPE_MODE (TREE_TYPE (treeop0));
goto binop;
+ case DOT_PROD_EXPR:
+ {
+ tree oprnd0 = treeop0;
+ tree oprnd1 = treeop1;
+ tree oprnd2 = treeop2;
+ rtx op2;
+
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ target = expand_widen_pattern_expr (ops, op0, op1, op2,
+ target, unsignedp);
+ return target;
+ }
+
+ case REALIGN_LOAD_EXPR:
+ {
+ tree oprnd0 = treeop0;
+ tree oprnd1 = treeop1;
+ tree oprnd2 = treeop2;
+ rtx op2;
+
+ this_optab = optab_for_tree_code (code, type, optab_default);
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
+ target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+ }
+
default:
gcc_unreachable ();
}
int unsignedp;
enum machine_mode mode;
enum tree_code code = TREE_CODE (exp);
- optab this_optab;
rtx subtarget, original_target;
int ignore;
tree context;
location_t loc = EXPR_LOCATION (exp);
struct separate_ops ops;
tree treeop0, treeop1, treeop2;
+ tree ssa_name = NULL_TREE;
+ gimple g;
type = TREE_TYPE (exp);
mode = TYPE_MODE (type);
{
temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
if (MEM_P (temp))
- temp = copy_to_reg (temp);
+ copy_to_reg (temp);
return const0_rtx;
}
base variable. This unnecessarily allocates a pseudo, see how we can
reuse it, if partition base vars have it set already. */
if (!currently_expanding_to_rtl)
- return expand_expr_real_1 (SSA_NAME_VAR (exp), target, tmode, modifier, NULL);
- {
- gimple g = get_gimple_for_ssa_name (exp);
- if (g)
- return expand_expr_real (gimple_assign_rhs_to_tree (g), target,
- tmode, modifier, NULL);
- }
- decl_rtl = get_rtx_for_ssa_name (exp);
- exp = SSA_NAME_VAR (exp);
+ return expand_expr_real_1 (SSA_NAME_VAR (exp), target, tmode, modifier,
+ NULL);
+
+ g = get_gimple_for_ssa_name (exp);
+ /* For EXPAND_INITIALIZER try harder to get something simpler. */
+ if (g == NULL
+ && modifier == EXPAND_INITIALIZER
+ && !SSA_NAME_IS_DEFAULT_DEF (exp)
+ && (optimize || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
+ && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
+ g = SSA_NAME_DEF_STMT (exp);
+ if (g)
+ return expand_expr_real (gimple_assign_rhs_to_tree (g), target, tmode,
+ modifier, NULL);
+
+ ssa_name = exp;
+ decl_rtl = get_rtx_for_ssa_name (ssa_name);
+ exp = SSA_NAME_VAR (ssa_name);
goto expand_decl_rtl;
case PARM_DECL:
&& (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
layout_decl (exp, 0);
- /* TLS emulation hook - replace __thread vars with
- *__emutls_get_address (&_emutls.var). */
- if (! targetm.have_tls
- && TREE_CODE (exp) == VAR_DECL
- && DECL_THREAD_LOCAL_P (exp))
- {
- exp = build_fold_indirect_ref_loc (loc, emutls_var_address (exp));
- return expand_expr_real_1 (exp, target, tmode, modifier, NULL);
- }
-
/* ... fall through ... */
case FUNCTION_DECL:
expand_decl_rtl:
gcc_assert (decl_rtl);
decl_rtl = copy_rtx (decl_rtl);
+ /* Record writes to register variables. */
+ if (modifier == EXPAND_WRITE
+ && REG_P (decl_rtl)
+ && HARD_REGISTER_P (decl_rtl))
+ add_to_hard_reg_set (&crtl->asm_clobbers,
+ GET_MODE (decl_rtl), REGNO (decl_rtl));
/* Ensure variable marked as used even if it doesn't go through
a parser. If it hasn't be used yet, write out an external
gcc_assert (!context
|| context == current_function_decl
|| TREE_STATIC (exp)
+ || DECL_EXTERNAL (exp)
/* ??? C++ creates functions that are not TREE_STATIC. */
|| TREE_CODE (exp) == FUNCTION_DECL);
/* If the mode of DECL_RTL does not match that of the decl, it
must be a promoted value. We return a SUBREG of the wanted mode,
but mark it so that we know that it was already extended. */
-
- if (REG_P (decl_rtl)
- && GET_MODE (decl_rtl) != DECL_MODE (exp))
+ if (REG_P (decl_rtl) && GET_MODE (decl_rtl) != DECL_MODE (exp))
{
enum machine_mode pmode;
- /* Get the signedness used for this variable. Ensure we get the
- same mode we got when the variable was declared. */
- pmode = promote_decl_mode (exp, &unsignedp);
+ /* Get the signedness to be used for this variable. Ensure we get
+ the same mode we got when the variable was declared. */
+ if (code == SSA_NAME
+ && (g = SSA_NAME_DEF_STMT (ssa_name))
+ && gimple_code (g) == GIMPLE_CALL)
+ {
+ gcc_assert (!gimple_call_internal_p (g));
+ pmode = promote_function_mode (type, mode, &unsignedp,
+ gimple_call_fntype (g),
+ 2);
+ }
+ else
+ pmode = promote_decl_mode (exp, &unsignedp);
gcc_assert (GET_MODE (decl_rtl) == pmode);
temp = gen_lowpart_SUBREG (mode, decl_rtl);
return expand_constructor (exp, target, modifier, false);
- case MISALIGNED_INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case INDIRECT_REF:
+ case TARGET_MEM_REF:
{
- tree exp1 = treeop0;
- addr_space_t as = ADDR_SPACE_GENERIC;
- enum machine_mode address_mode = Pmode;
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
+ struct mem_address addr;
+ int icode, align;
- if (modifier != EXPAND_WRITE)
+ get_address_description (exp, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
+ op0 = memory_address_addr_space (mode, op0, as);
+ temp = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (temp, exp, 0);
+ set_mem_addr_space (temp, as);
+ align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
+ get_object_alignment (exp, BIGGEST_ALIGNMENT));
+ if (mode != BLKmode
+ && (unsigned) align < GET_MODE_ALIGNMENT (mode)
+ /* If the target does not have special handling for unaligned
+ loads of mode then it can use regular moves for them. */
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
{
- tree t;
+ rtx reg, insn;
- t = fold_read_from_constant_string (exp);
- if (t)
- return expand_expr (t, target, tmode, modifier);
+ /* We've already validated the memory, and we're creating a
+ new pseudo destination. The predicates really can't fail. */
+ reg = gen_reg_rtx (mode);
+
+ /* Nor can the insn generator. */
+ insn = GEN_FCN (icode) (reg, temp);
+ gcc_assert (insn != NULL_RTX);
+ emit_insn (insn);
+
+ return reg;
}
+ return temp;
+ }
- if (POINTER_TYPE_P (TREE_TYPE (exp1)))
+ case MEM_REF:
+ {
+ addr_space_t as
+ = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 1))));
+ enum machine_mode address_mode;
+ tree base = TREE_OPERAND (exp, 0);
+ gimple def_stmt;
+ int icode, align;
+ /* Handle expansion of non-aliased memory with non-BLKmode. That
+ might end up in a register. */
+ if (TREE_CODE (base) == ADDR_EXPR)
{
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp1)));
- address_mode = targetm.addr_space.address_mode (as);
+ HOST_WIDE_INT offset = mem_ref_offset (exp).low;
+ tree bit_offset;
+ base = TREE_OPERAND (base, 0);
+ if (!DECL_P (base))
+ {
+ HOST_WIDE_INT off;
+ base = get_addr_base_and_unit_offset (base, &off);
+ gcc_assert (base);
+ offset += off;
+ }
+ /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
+ decl we must use bitfield operations. */
+ if (DECL_P (base)
+ && !TREE_ADDRESSABLE (base)
+ && DECL_MODE (base) != BLKmode
+ && DECL_RTL_SET_P (base)
+ && !MEM_P (DECL_RTL (base)))
+ {
+ tree bftype;
+ if (offset == 0
+ && host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)
+ && (GET_MODE_BITSIZE (DECL_MODE (base))
+ == TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp)))))
+ return expand_expr (build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (exp), base),
+ target, tmode, modifier);
+ bit_offset = bitsize_int (offset * BITS_PER_UNIT);
+ bftype = TREE_TYPE (base);
+ if (TYPE_MODE (TREE_TYPE (exp)) != BLKmode)
+ bftype = TREE_TYPE (exp);
+ return expand_expr (build3 (BIT_FIELD_REF, bftype,
+ base,
+ TYPE_SIZE (TREE_TYPE (exp)),
+ bit_offset),
+ target, tmode, modifier);
+ }
}
-
- op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
- op0 = memory_address_addr_space (mode, op0, as);
-
- if (code == ALIGN_INDIRECT_REF)
+ address_mode = targetm.addr_space.address_mode (as);
+ base = TREE_OPERAND (exp, 0);
+ if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
{
- int align = TYPE_ALIGN_UNIT (type);
- op0 = gen_rtx_AND (address_mode, op0, GEN_INT (-align));
- op0 = memory_address_addr_space (mode, op0, as);
+ tree mask = gimple_assign_rhs2 (def_stmt);
+ base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
+ gimple_assign_rhs1 (def_stmt), mask);
+ TREE_OPERAND (exp, 0) = base;
}
-
+ align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
+ get_object_alignment (exp, BIGGEST_ALIGNMENT));
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
+ op0 = memory_address_addr_space (address_mode, op0, as);
+ if (!integer_zerop (TREE_OPERAND (exp, 1)))
+ {
+ rtx off
+ = immed_double_int_const (mem_ref_offset (exp), address_mode);
+ op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
+ }
+ op0 = memory_address_addr_space (mode, op0, as);
temp = gen_rtx_MEM (mode, op0);
-
set_mem_attributes (temp, exp, 0);
set_mem_addr_space (temp, as);
-
- /* Resolve the misalignment now, so that we don't have to remember
- to resolve it later. Of course, this only works for reads. */
- if (code == MISALIGNED_INDIRECT_REF)
+ if (TREE_THIS_VOLATILE (exp))
+ MEM_VOLATILE_P (temp) = 1;
+ if (mode != BLKmode
+ && (unsigned) align < GET_MODE_ALIGNMENT (mode)
+ /* If the target does not have special handling for unaligned
+ loads of mode then it can use regular moves for them. */
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
{
- int icode;
rtx reg, insn;
- gcc_assert (modifier == EXPAND_NORMAL
- || modifier == EXPAND_STACK_PARM);
-
- /* The vectorizer should have already checked the mode. */
- icode = optab_handler (movmisalign_optab, mode)->insn_code;
- gcc_assert (icode != CODE_FOR_nothing);
-
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail. */
reg = gen_reg_rtx (mode);
return reg;
}
-
return temp;
}
- case TARGET_MEM_REF:
- {
- addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
- struct mem_address addr;
-
- get_address_description (exp, &addr);
- op0 = addr_for_mem_ref (&addr, as, true);
- op0 = memory_address_addr_space (mode, op0, as);
- temp = gen_rtx_MEM (mode, op0);
- set_mem_attributes (temp, TMR_ORIGINAL (exp), 0);
- set_mem_addr_space (temp, as);
- }
- return temp;
-
case ARRAY_REF:
{
&& TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
&& TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
&& TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK
- && targetm.binds_local_p (array))
+ && const_value_known_p (array))
{
if (TREE_CODE (index) == INTEGER_CST)
{
}
else
{
- tree count
- = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (imode) - bitsize);
+ int count = GET_MODE_BITSIZE (imode) - bitsize;
op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
target, 0);
HOST_WIDE_INT bitsize, bitpos;
tree offset;
int volatilep = 0, must_force_mem;
+ bool packedp = false;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
&mode1, &unsignedp, &volatilep, true);
rtx orig_op0, memloc;
infinitely recurse. */
gcc_assert (tem != exp);
+ if (TYPE_PACKED (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ || (TREE_CODE (TREE_OPERAND (exp, 1)) == FIELD_DECL
+ && DECL_PACKED (TREE_OPERAND (exp, 1))))
+ packedp = true;
+
/* If TEM's type is a union of variable size, pass TARGET to the inner
computation, since it will need a temporary and TARGET is known
to have to do. This occurs in unchecked conversion in Ada. */
|| modifier == EXPAND_STACK_PARM)
? modifier : EXPAND_NORMAL);
+
+ /* If the bitfield is volatile, we want to access it in the
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (op0))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ op0 = adjust_address (op0, mode1, 0);
+ else if (GET_MODE (op0) == VOIDmode)
+ op0 = adjust_address (op0, BLKmode, 0);
+ }
+
mode2
= CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
constant and we don't need a memory reference. */
if (CONSTANT_P (op0)
&& mode2 != BLKmode
- && LEGITIMATE_CONSTANT_P (op0)
+ && targetm.legitimate_constant_p (mode2, op0)
&& !must_force_mem)
op0 = force_reg (mode2, op0);
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER)
+ /* If the field is volatile, we always want an aligned
+ access. Only do this if the access is not already naturally
+ aligned, otherwise "normal" (non-bitfield) volatile fields
+ become non-addressable. */
+ || (volatilep && flag_strict_volatile_bitfields > 0
+ && (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
- op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
+ op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, packedp,
(modifier == EXPAND_STACK_PARM
? NULL_RTX : target),
ext_mode, ext_mode);
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
&& bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (op0)))
op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0,
- size_int (GET_MODE_BITSIZE (GET_MODE (op0))
- - bitsize),
- op0, 1);
+ GET_MODE_BITSIZE (GET_MODE (op0))
+ - bitsize, op0, 1);
/* If the result type is BLKmode, store the data into a temporary
of the appropriate type, but with the mode corresponding to the
{
if (GET_CODE (op0) == SUBREG)
op0 = force_reg (GET_MODE (op0), op0);
- op0 = gen_lowpart (mode, op0);
+ temp = gen_lowpart_common (mode, op0);
+ if (temp)
+ op0 = temp;
+ else
+ {
+ if (!REG_P (op0) && !MEM_P (op0))
+ op0 = force_reg (GET_MODE (op0), op0);
+ op0 = gen_lowpart (mode, op0);
+ }
}
- /* If both modes are integral, then we can convert from one to the
- other. */
- else if (SCALAR_INT_MODE_P (GET_MODE (op0)) && SCALAR_INT_MODE_P (mode))
+ /* If both types are integral, convert from one mode to the other. */
+ else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
op0 = convert_modes (mode, GET_MODE (op0), op0,
TYPE_UNSIGNED (TREE_TYPE (treeop0)));
/* As a last resort, spill op0 to memory, and reload it in a
return expand_expr_real (treeop0, original_target, tmode,
modifier, alt_rtl);
- case REALIGN_LOAD_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
- tree oprnd2 = treeop2;
- rtx op2;
-
- this_optab = optab_for_tree_code (code, type, optab_default);
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (oprnd2);
- temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
- target, unsignedp);
- gcc_assert (temp);
- return temp;
- }
-
- case DOT_PROD_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
- tree oprnd2 = treeop2;
- rtx op2;
-
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (oprnd2);
- target = expand_widen_pattern_expr (&ops, op0, op1, op2,
- target, unsignedp);
- return target;
- }
-
case COMPOUND_LITERAL_EXPR:
{
/* Initialize the anonymous variable declared in the compound
}
else if (TYPE_UNSIGNED (type))
{
- rtx mask;
- if (prec < HOST_BITS_PER_WIDE_INT)
- mask = immed_double_const (((unsigned HOST_WIDE_INT) 1 << prec) - 1, 0,
- GET_MODE (exp));
- else
- mask = immed_double_const ((unsigned HOST_WIDE_INT) -1,
- ((unsigned HOST_WIDE_INT) 1
- << (prec - HOST_BITS_PER_WIDE_INT)) - 1,
- GET_MODE (exp));
+ rtx mask = immed_double_int_const (double_int_mask (prec),
+ GET_MODE (exp));
return expand_and (GET_MODE (exp), exp, mask, target);
}
else
{
- tree count = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (GET_MODE (exp)) - prec);
- exp = expand_shift (LSHIFT_EXPR, GET_MODE (exp), exp, count, target, 0);
- return expand_shift (RSHIFT_EXPR, GET_MODE (exp), exp, count, target, 0);
+ int count = GET_MODE_BITSIZE (GET_MODE (exp)) - prec;
+ exp = expand_shift (LSHIFT_EXPR, GET_MODE (exp),
+ exp, count, target, 0);
+ return expand_shift (RSHIFT_EXPR, GET_MODE (exp),
+ exp, count, target, 0);
}
}
\f
*ptr_offset = fold_convert (sizetype, offset);
return array;
}
- else if (TREE_CODE (array) == VAR_DECL)
+ else if (TREE_CODE (array) == VAR_DECL
+ || TREE_CODE (array) == CONST_DECL)
{
int length;
/* Variables initialized to string literals can be handled too. */
- if (DECL_INITIAL (array) == NULL_TREE
+ if (!const_value_known_p (array)
+ || !DECL_INITIAL (array)
|| TREE_CODE (DECL_INITIAL (array)) != STRING_CST)
return 0;
- /* If they are read-only, non-volatile and bind locally. */
- if (! TREE_READONLY (array)
- || TREE_SIDE_EFFECTS (array)
- || ! targetm.binds_local_p (array))
- return 0;
-
/* Avoid const char foo[4] = "abcde"; */
if (DECL_SIZE_UNIT (array) == NULL_TREE
|| TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
if ((code == NE || code == EQ)
&& TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1)
- && integer_pow2p (TREE_OPERAND (arg0, 1)))
+ && integer_pow2p (TREE_OPERAND (arg0, 1))
+ && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
{
tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
return expand_expr (fold_single_bit_test (loc,
/* Try a cstore if possible. */
return emit_store_flag_force (target, code, op0, op1,
- operand_mode, unsignedp, 1);
+ operand_mode, unsignedp,
+ (TYPE_PRECISION (ops->type) == 1
+ && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
}
\f
rtx table_label ATTRIBUTE_UNUSED, rtx default_label,
rtx fallback_label ATTRIBUTE_UNUSED)
{
+ struct expand_operand ops[5];
enum machine_mode index_mode = SImode;
int index_bits = GET_MODE_BITSIZE (index_mode);
rtx op1, op2, index;
- enum machine_mode op_mode;
if (! HAVE_casesi)
return 0;
do_pending_stack_adjust ();
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[0].mode;
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[0].predicate)
- (index, op_mode))
- index = copy_to_mode_reg (op_mode, index);
-
op1 = expand_normal (minval);
-
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[1].mode;
- op1 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (minval)),
- op1, TYPE_UNSIGNED (TREE_TYPE (minval)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[1].predicate)
- (op1, op_mode))
- op1 = copy_to_mode_reg (op_mode, op1);
-
op2 = expand_normal (range);
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[2].mode;
- op2 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (range)),
- op2, TYPE_UNSIGNED (TREE_TYPE (range)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[2].predicate)
- (op2, op_mode))
- op2 = copy_to_mode_reg (op_mode, op2);
-
- emit_jump_insn (gen_casesi (index, op1, op2,
- table_label, !default_label
- ? fallback_label : default_label));
+ create_input_operand (&ops[0], index, index_mode);
+ create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
+ create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
+ create_fixed_operand (&ops[3], table_label);
+ create_fixed_operand (&ops[4], (default_label
+ ? default_label
+ : fallback_label));
+ expand_jump_insn (CODE_FOR_casesi, 5, ops);
return 1;
}
return 1;
}
-/* Nonzero if the mode is a valid vector mode for this architecture.
- This returns nonzero even if there is no hardware support for the
- vector mode, but we can emulate with narrower modes. */
-
-int
-vector_mode_valid_p (enum machine_mode mode)
-{
- enum mode_class mclass = GET_MODE_CLASS (mode);
- enum machine_mode innermode;
-
- /* Doh! What's going on? */
- if (mclass != MODE_VECTOR_INT
- && mclass != MODE_VECTOR_FLOAT
- && mclass != MODE_VECTOR_FRACT
- && mclass != MODE_VECTOR_UFRACT
- && mclass != MODE_VECTOR_ACCUM
- && mclass != MODE_VECTOR_UACCUM)
- return 0;
-
- /* Hardware support. Woo hoo! */
- if (targetm.vector_mode_supported_p (mode))
- return 1;
-
- innermode = GET_MODE_INNER (mode);
-
- /* We should probably return 1 if requesting V4DI and we have no DI,
- but we have V2DI, but this is probably very unlikely. */
-
- /* If we have support for the inner mode, we can safely emulate it.
- We may not have V2DI, but me can emulate with a pair of DIs. */
- return targetm.scalar_mode_supported_p (innermode);
-}
-
/* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
static rtx
const_vector_from_tree (tree exp)
RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
inner);
else
- RTVEC_ELT (v, i) = immed_double_const (TREE_INT_CST_LOW (elt),
- TREE_INT_CST_HIGH (elt),
- inner);
+ RTVEC_ELT (v, i) = immed_double_int_const (tree_to_double_int (elt),
+ inner);
}
/* Initialize remaining elements to 0. */
return gen_rtx_CONST_VECTOR (mode, v);
}
-
-/* Build a decl for a EH personality function named NAME. */
+/* Build a decl for a personality function given a language prefix. */
tree
-build_personality_function (const char *name)
+build_personality_function (const char *lang)
{
+ const char *unwind_and_version;
tree decl, type;
+ char *name;
+
+ switch (targetm.except_unwind_info (&global_options))
+ {
+ case UI_NONE:
+ return NULL;
+ case UI_SJLJ:
+ unwind_and_version = "_sj0";
+ break;
+ case UI_DWARF2:
+ case UI_TARGET:
+ unwind_and_version = "_v0";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
type = build_function_type_list (integer_type_node, integer_type_node,
long_long_unsigned_type_node,