/* Convert tree expression to rtl instructions, for GNU compiler.
Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
PUT_MODE (mem, srcmode);
- if ((*insn_data[ic].operand[1].predicate) (mem, srcmode))
+ if (insn_operand_matches (ic, 1, mem))
float_extend_from_mem[mode][srcmode] = true;
}
}
{
enum machine_mode intermediate;
rtx tmp;
- tree shift_amount;
+ int shift_amount;
/* Search for a mode to convert via. */
for (intermediate = from_mode; intermediate != VOIDmode;
/* No suitable intermediate mode.
Generate what we need with shifts. */
- shift_amount = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (to_mode)
- - GET_MODE_BITSIZE (from_mode));
+ shift_amount = (GET_MODE_BITSIZE (to_mode)
+ - GET_MODE_BITSIZE (from_mode));
from = gen_lowpart (to_mode, force_reg (from_mode, from));
tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
to, unsignedp);
return temp;
}
\f
+/* Return the largest alignment we can use for doing a move (or store)
+ of MAX_PIECES. ALIGN is the largest alignment we could use. */
+
+static unsigned int
+alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
+{
+ enum machine_mode tmode;
+
+ tmode = mode_for_size (max_pieces * BITS_PER_UNIT, MODE_INT, 1);
+ if (align >= GET_MODE_ALIGNMENT (tmode))
+ align = GET_MODE_ALIGNMENT (tmode);
+ else
+ {
+ enum machine_mode tmode, xmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
+ tmode != VOIDmode;
+ xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) > max_pieces
+ || SLOW_UNALIGNED_ACCESS (tmode, align))
+ break;
+
+ align = MAX (align, GET_MODE_ALIGNMENT (xmode));
+ }
+
+ return align;
+}
+
+/* Return the widest integer mode no wider than SIZE. If no such mode
+ can be found, return VOIDmode. */
+
+static enum machine_mode
+widest_int_mode_for_size (unsigned int size)
+{
+ enum machine_mode tmode, mode = VOIDmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < size)
+ mode = tmode;
+
+ return mode;
+}
+
/* STORE_MAX_PIECES is the number of bytes at a time that we can
store efficiently. Due to internal GCC limitations, this is
MOVE_MAX_PIECES limited by the number of bytes GCC can represent
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (from));
rtx to_addr, from_addr = XEXP (from, 0);
unsigned int max_size = MOVE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from));
if (!(data.autinc_from && data.autinc_to)
&& move_by_pieces_ninsns (len, align, max_size) > 2)
{
- /* Find the mode of the largest move... */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Find the mode of the largest move...
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from)
{
data.to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
unsigned int max_size)
{
unsigned HOST_WIDE_INT n_insns = 0;
- enum machine_mode tmode;
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode tmode, xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
while (max_size > 1)
{
- enum machine_mode mode = VOIDmode;
+ enum machine_mode mode;
enum insn_code icode;
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
unsigned int expected_align, HOST_WIDE_INT expected_size)
{
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
int save_volatile_ok = volatile_ok;
enum machine_mode mode;
mode = GET_MODE_WIDER_MODE (mode))
{
enum insn_code code = direct_optab_handler (movmem_optab, mode);
- insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (x, BLKmode))
- && ((pred = insn_data[(int) code].operand[1].predicate) == 0
- || (*pred) (y, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
{
- rtx op2;
- rtx last = get_last_insn ();
- rtx pat;
-
- op2 = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (op2, mode))
- op2 = copy_to_mode_reg (mode, op2);
+ struct expand_operand ops[6];
+ unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
nice if there were some way to inform the backend, so
that it doesn't fail the expansion because it thinks
emitting the libcall would be more efficient. */
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (x, y, op2, opalign);
- else
- pat = GEN_FCN ((int) code) (x, y, op2, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
+ nops = insn_data[(int) code].n_generator_args;
+ gcc_assert (nops == 4 || nops == 6);
+
+ create_fixed_operand (&ops[0], x);
+ create_fixed_operand (&ops[1], y);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[2], size, mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops == 6)
+ {
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
+ }
+ if (maybe_expand_insn (code, nops, ops))
{
- emit_insn (pat);
volatile_ok = save_volatile_ok;
return true;
}
- else
- delete_insns_since (last);
}
}
if (nregs == 0)
return;
- if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
x = validize_mem (force_const_mem (mode, x));
/* See if the machine can do this with a load multiple insn. */
&& (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))
tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
(bytepos % slen0) * BITS_PER_UNIT,
- 1, NULL_RTX, mode, mode);
+ 1, false, NULL_RTX, mode, mode);
}
else
{
mem = assign_stack_temp (GET_MODE (src), slen, 0);
emit_move_insn (mem, src);
tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
- 0, 1, NULL_RTX, mode, mode);
+ 0, 1, false, NULL_RTX, mode, mode);
}
}
/* FIXME: A SIMD parallel will eventually lead to a subreg of a
tmps[i] = src;
else
tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
- bytepos * BITS_PER_UNIT, 1, NULL_RTX,
+ bytepos * BITS_PER_UNIT, 1, false, NULL_RTX,
mode, mode);
if (shift)
tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
- build_int_cst (NULL_TREE, shift), tmps[i], 0);
+ shift, tmps[i], 0);
}
}
{
int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
- build_int_cst (NULL_TREE, shift),
- tmps[i], 0);
+ shift, tmps[i], 0);
}
bytelen = adj_bytelen;
}
bitpos for the destination store (left justified). */
store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, copy_mode,
extract_bit_field (src, bitsize,
- xbitpos % BITS_PER_WORD, 1,
+ xbitpos % BITS_PER_WORD, 1, false,
NULL_RTX, copy_mode, copy_mode));
}
unsigned HOST_WIDE_INT l;
unsigned int max_size;
HOST_WIDE_INT offset = 0;
- enum machine_mode mode, tmode;
+ enum machine_mode mode;
enum insn_code icode;
int reverse;
- rtx cst;
+ /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
+ rtx cst ATTRIBUTE_UNUSED;
if (len == 0)
return 1;
: STORE_BY_PIECES_P (len, align)))
return 0;
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* We would first store what we can in the largest integer mode, then go to
successively smaller modes. */
reverse++)
{
l = len;
- mode = VOIDmode;
max_size = STORE_MAX_PIECES + 1;
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
offset -= size;
cst = (*constfun) (constfundata, offset, mode);
- if (!LEGITIMATE_CONSTANT_P (cst))
+ if (!targetm.legitimate_constant_p (mode, cst))
return 0;
if (!reverse)
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to));
rtx to_addr = XEXP (data->to, 0);
unsigned int max_size = STORE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
data->offset = 0;
if (!data->autinc_to
&& move_by_pieces_ninsns (data->len, align, max_size) > 2)
{
- /* Determine the main mode we'll be using. */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Determine the main mode we'll be using.
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to)
{
data->to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* First store what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
including more than one in the machine description unless
the more limited one has some advantage. */
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
if (expected_align < align)
mode = GET_MODE_WIDER_MODE (mode))
{
enum insn_code code = direct_optab_handler (setmem_optab, mode);
- insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (object, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
{
- rtx opsize, opchar;
- enum machine_mode char_mode;
- rtx last = get_last_insn ();
- rtx pat;
-
- opsize = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[1].predicate;
- if (pred != 0 && ! (*pred) (opsize, mode))
- opsize = copy_to_mode_reg (mode, opsize);
-
- opchar = val;
- char_mode = insn_data[(int) code].operand[2].mode;
- if (char_mode != VOIDmode)
+ struct expand_operand ops[6];
+ unsigned int nops;
+
+ nops = insn_data[(int) code].n_generator_args;
+ gcc_assert (nops == 4 || nops == 6);
+
+ create_fixed_operand (&ops[0], object);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[1], size, mode, true);
+ create_convert_operand_from (&ops[2], val, byte_mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops == 6)
{
- opchar = convert_to_mode (char_mode, opchar, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (opchar, char_mode))
- opchar = copy_to_mode_reg (char_mode, opchar);
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
}
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign);
- else
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
- {
- emit_insn (pat);
- return true;
- }
- else
- delete_insns_since (last);
+ if (maybe_expand_insn (code, nops, ops))
+ return true;
}
}
}
return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
- true, NULL_RTX, imode, imode);
+ true, false, NULL_RTX, imode, imode);
}
\f
/* A subroutine of emit_move_insn_1. Yet another lowpart generator.
y_cst = y;
- if (!LEGITIMATE_CONSTANT_P (y))
+ if (!targetm.legitimate_constant_p (mode, y))
{
y = force_const_mem (mode, y);
&& (set = single_set (last_insn)) != NULL_RTX
&& SET_DEST (set) == x
&& ! rtx_equal_p (y_cst, SET_SRC (set)))
- set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+ set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
return last_insn;
}
REAL_VALUE_FROM_CONST_DOUBLE (r, y);
- if (LEGITIMATE_CONSTANT_P (y))
+ if (targetm.legitimate_constant_p (dstmode, y))
oldcost = rtx_cost (y, SET, speed);
else
oldcost = rtx_cost (force_const_mem (dstmode, y), SET, speed);
trunc_y = CONST_DOUBLE_FROM_REAL_VALUE (r, srcmode);
- if (LEGITIMATE_CONSTANT_P (trunc_y))
+ if (targetm.legitimate_constant_p (srcmode, trunc_y))
{
/* Skip if the target needs extra instructions to perform
the extension. */
- if (! (*insn_data[ic].operand[1].predicate) (trunc_y, srcmode))
+ if (!insn_operand_matches (ic, 1, trunc_y))
continue;
/* This is valid, but may not be cheaper than the original. */
newcost = rtx_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y), SET, speed);
unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
rtx dest;
enum insn_code icode;
- insn_operand_predicate_fn pred;
stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
/* If there is push pattern, use it. Otherwise try old way of throwing
icode = optab_handler (push_optab, mode);
if (icode != CODE_FOR_nothing)
{
- if (((pred = insn_data[(int) icode].operand[0].predicate)
- && !((*pred) (x, mode))))
- x = force_reg (mode, x);
- emit_insn (GEN_FCN (icode) (x));
- return;
+ struct expand_operand ops[1];
+
+ create_input_operand (&ops[0], x, mode);
+ if (maybe_expand_insn (icode, 1, ops))
+ return;
}
if (GET_MODE_SIZE (mode) == rounded_size)
dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
|| align >= BIGGEST_ALIGNMENT
|| (PUSH_ROUNDING (align / BITS_PER_UNIT)
== (align / BITS_PER_UNIT)))
- && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ && (HOST_WIDE_INT) PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
{
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
by setting SKIP to 0. */
skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
- if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
x = validize_mem (force_const_mem (mode, x));
/* If X is a hard register in a non-integer mode, copy it into a pseudo;
tree op0, op1;
rtx value, result;
optab binop;
+ gimple srcstmt;
+ enum tree_code code;
if (mode1 != VOIDmode
|| bitsize >= BITS_PER_WORD
return false;
STRIP_NOPS (src);
- if (!BINARY_CLASS_P (src)
- || TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ if (TREE_CODE (src) != SSA_NAME)
+ return false;
+ if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ return false;
+
+ srcstmt = get_gimple_for_ssa_name (src);
+ if (!srcstmt
+ || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
return false;
- op0 = TREE_OPERAND (src, 0);
- op1 = TREE_OPERAND (src, 1);
- STRIP_NOPS (op0);
+ code = gimple_assign_rhs_code (srcstmt);
+
+ op0 = gimple_assign_rhs1 (srcstmt);
+
+ /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
+ to find its initialization. Hopefully the initialization will
+ be from a bitfield load. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ {
+ gimple op0stmt = get_gimple_for_ssa_name (op0);
+
+ /* We want to eventually have OP0 be the same as TO, which
+ should be a bitfield. */
+ if (!op0stmt
+ || !is_gimple_assign (op0stmt)
+ || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
+ return false;
+ op0 = gimple_assign_rhs1 (op0stmt);
+ }
+
+ op1 = gimple_assign_rhs2 (srcstmt);
if (!operand_equal_p (to, op0, 0))
return false;
if (BYTES_BIG_ENDIAN)
bitpos = str_bitsize - bitpos - bitsize;
- switch (TREE_CODE (src))
+ switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
set_mem_expr (str_rtx, 0);
}
- binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ binop = code == PLUS_EXPR ? add_optab : sub_optab;
if (bitsize == 1 && bitpos + bitsize != str_bitsize)
{
value = expand_and (str_mode, value, const1_rtx, NULL);
binop = xor_optab;
}
value = expand_shift (LSHIFT_EXPR, str_mode, value,
- build_int_cst (NULL_TREE, bitpos),
- NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
result = expand_binop (str_mode, binop, str_rtx,
value, str_rtx, 1, OPTAB_WIDEN);
if (result != str_rtx)
set_mem_expr (str_rtx, 0);
}
- binop = TREE_CODE (src) == BIT_IOR_EXPR ? ior_optab : xor_optab;
+ binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
if (bitpos + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx)))
{
rtx mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << bitsize)
NULL_RTX);
}
value = expand_shift (LSHIFT_EXPR, GET_MODE (str_rtx), value,
- build_int_cst (NULL_TREE, bitpos),
- NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
result = expand_binop (GET_MODE (str_rtx), binop, str_rtx,
value, str_rtx, 1, OPTAB_WIDEN);
if (result != str_rtx)
{
rtx to_rtx = 0;
rtx result;
+ enum machine_mode mode;
+ int align;
+ enum insn_code icode;
/* Don't crash if the lhs of the assignment was erroneous. */
if (TREE_CODE (to) == ERROR_MARK)
{
- result = expand_normal (from);
+ expand_normal (from);
return;
}
if (operand_equal_p (to, from, 0))
return;
+ mode = TYPE_MODE (TREE_TYPE (to));
+ if ((TREE_CODE (to) == MEM_REF
+ || TREE_CODE (to) == TARGET_MEM_REF)
+ && mode != BLKmode
+ && ((align = MAX (TYPE_ALIGN (TREE_TYPE (to)),
+ get_object_alignment (to, BIGGEST_ALIGNMENT)))
+ < (signed) GET_MODE_ALIGNMENT (mode))
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
+ {
+ struct expand_operand ops[2];
+ enum machine_mode address_mode;
+ rtx reg, op0, mem;
+
+ reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ reg = force_not_mem (reg);
+
+ if (TREE_CODE (to) == MEM_REF)
+ {
+ addr_space_t as
+ = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (to, 1))));
+ tree base = TREE_OPERAND (to, 0);
+ address_mode = targetm.addr_space.address_mode (as);
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ op0 = convert_memory_address_addr_space (address_mode, op0, as);
+ if (!integer_zerop (TREE_OPERAND (to, 1)))
+ {
+ rtx off
+ = immed_double_int_const (mem_ref_offset (to), address_mode);
+ op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
+ }
+ op0 = memory_address_addr_space (mode, op0, as);
+ mem = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (mem, to, 0);
+ set_mem_addr_space (mem, as);
+ }
+ else if (TREE_CODE (to) == TARGET_MEM_REF)
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (to));
+ struct mem_address addr;
+
+ get_address_description (to, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
+ op0 = memory_address_addr_space (mode, op0, as);
+ mem = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (mem, to, 0);
+ set_mem_addr_space (mem, as);
+ }
+ else
+ gcc_unreachable ();
+ if (TREE_THIS_VOLATILE (to))
+ MEM_VOLATILE_P (mem) = 1;
+
+ create_fixed_operand (&ops[0], mem);
+ create_input_operand (&ops[1], reg, mode);
+ /* The movmisalign<mode> pattern cannot fail, else the assignment would
+ silently be omitted. */
+ expand_insn (icode, 2, ops);
+ return;
+ }
+
/* Assignment of a structure component needs special treatment
if the structure component's rtx is not simply a MEM.
Assignment of an array element at a constant index, and assignment of
to_rtx = expand_normal (tem);
/* If the bitfield is volatile, we want to access it in the
- field's mode, not the computed mode. */
- if (volatilep
- && GET_CODE (to_rtx) == MEM
- && flag_strict_volatile_bitfields > 0)
- to_rtx = adjust_address (to_rtx, mode1, 0);
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (to_rtx))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ to_rtx = adjust_address (to_rtx, mode1, 0);
+ else if (GET_MODE (to_rtx) == VOIDmode)
+ to_rtx = adjust_address (to_rtx, BLKmode, 0);
+ }
if (offset != 0)
{
/* Handle expand_expr of a complex value returning a CONCAT. */
else if (GET_CODE (to_rtx) == CONCAT)
{
- if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from))))
+ unsigned short mode_bitsize = GET_MODE_BITSIZE (GET_MODE (to_rtx));
+ if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from)))
+ && bitpos == 0
+ && bitsize == mode_bitsize)
+ result = store_expr (from, to_rtx, false, nontemporal);
+ else if (bitsize == mode_bitsize / 2
+ && (bitpos == 0 || bitpos == mode_bitsize / 2))
+ result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
+ nontemporal);
+ else if (bitpos + bitsize <= mode_bitsize / 2)
+ result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
+ mode1, from, TREE_TYPE (tem),
+ get_alias_set (to), nontemporal);
+ else if (bitpos >= mode_bitsize / 2)
+ result = store_field (XEXP (to_rtx, 1), bitsize,
+ bitpos - mode_bitsize / 2, mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
+ else if (bitpos == 0 && bitsize == mode_bitsize)
{
- gcc_assert (bitpos == 0);
- result = store_expr (from, to_rtx, false, nontemporal);
+ rtx from_rtx;
+ result = expand_normal (from);
+ from_rtx = simplify_gen_subreg (GET_MODE (to_rtx), result,
+ TYPE_MODE (TREE_TYPE (from)), 0);
+ emit_move_insn (XEXP (to_rtx, 0),
+ read_complex_part (from_rtx, false));
+ emit_move_insn (XEXP (to_rtx, 1),
+ read_complex_part (from_rtx, true));
}
else
{
- gcc_assert (bitpos == 0 || bitpos == GET_MODE_BITSIZE (mode1));
- result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
- nontemporal);
+ rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ GET_MODE_SIZE (GET_MODE (to_rtx)),
+ 0);
+ write_complex_part (temp, XEXP (to_rtx, 0), false);
+ write_complex_part (temp, XEXP (to_rtx, 1), true);
+ result = store_field (temp, bitsize, bitpos, mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
+ emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
+ emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
}
}
else
return;
}
- else if (TREE_CODE (to) == MISALIGNED_INDIRECT_REF)
- {
- addr_space_t as = ADDR_SPACE_GENERIC;
- enum machine_mode mode, op_mode1;
- enum insn_code icode;
- rtx reg, addr, mem, insn;
-
- if (POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (to, 0))))
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (to, 0))));
-
- reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
- reg = force_not_mem (reg);
-
- mode = TYPE_MODE (TREE_TYPE (to));
- addr = expand_expr (TREE_OPERAND (to, 0), NULL_RTX, VOIDmode,
- EXPAND_SUM);
- addr = memory_address_addr_space (mode, addr, as);
- mem = gen_rtx_MEM (mode, addr);
-
- set_mem_attributes (mem, to, 0);
- set_mem_addr_space (mem, as);
-
- icode = optab_handler (movmisalign_optab, mode);
- gcc_assert (icode != CODE_FOR_nothing);
-
- op_mode1 = insn_data[icode].operand[1].mode;
- if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1)
- && op_mode1 != VOIDmode)
- reg = copy_to_mode_reg (op_mode1, reg);
-
- insn = GEN_FCN (icode) (mem, reg);
- emit_insn (insn);
- return;
- }
-
/* If the rhs is a function call and its value is not an aggregate,
call the function before we start to compute the lhs.
This is needed for correct code for cases such as
bool
emit_storent_insn (rtx to, rtx from)
{
- enum machine_mode mode = GET_MODE (to), imode;
+ struct expand_operand ops[2];
+ enum machine_mode mode = GET_MODE (to);
enum insn_code code = optab_handler (storent_optab, mode);
- rtx pattern;
if (code == CODE_FOR_nothing)
return false;
- imode = insn_data[code].operand[0].mode;
- if (!insn_data[code].operand[0].predicate (to, imode))
- return false;
-
- imode = insn_data[code].operand[1].mode;
- if (!insn_data[code].operand[1].predicate (from, imode))
- {
- from = copy_to_mode_reg (imode, from);
- if (!insn_data[code].operand[1].predicate (from, imode))
- return false;
- }
-
- pattern = GEN_FCN (code) (to, from);
- if (pattern == NULL_RTX)
- return false;
-
- emit_insn (pattern);
- return true;
+ create_fixed_operand (&ops[0], to);
+ create_input_operand (&ops[1], from, mode);
+ return maybe_expand_insn (code, 2, ops);
}
/* Generate code for computing expression EXP,
return NULL_RTX;
}
- else if (TREE_CODE (exp) == STRING_CST
+ else if ((TREE_CODE (exp) == STRING_CST
+ || (TREE_CODE (exp) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == STRING_CST
+ && integer_zerop (TREE_OPERAND (exp, 1))))
&& !nontemporal && !call_param_p
- && TREE_STRING_LENGTH (exp) > 0
- && TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
+ && MEM_P (target))
{
/* Optimize initialization of an array with a STRING_CST. */
HOST_WIDE_INT exp_len, str_copy_len;
rtx dest_mem;
+ tree str = TREE_CODE (exp) == STRING_CST
+ ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
exp_len = int_expr_size (exp);
if (exp_len <= 0)
goto normal_expr;
- str_copy_len = strlen (TREE_STRING_POINTER (exp));
- if (str_copy_len < TREE_STRING_LENGTH (exp) - 1)
- goto normal_expr;
-
- str_copy_len = TREE_STRING_LENGTH (exp);
- if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
- {
- str_copy_len += STORE_MAX_PIECES - 1;
- str_copy_len &= ~(STORE_MAX_PIECES - 1);
- }
- str_copy_len = MIN (str_copy_len, exp_len);
- if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (exp)),
- MEM_ALIGN (target), false))
- goto normal_expr;
-
- dest_mem = target;
-
- dest_mem = store_by_pieces (dest_mem,
- str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (exp)),
- MEM_ALIGN (target), false,
- exp_len > str_copy_len ? 1 : 0);
- if (exp_len > str_copy_len)
- clear_storage (adjust_address (dest_mem, BLKmode, 0),
- GEN_INT (exp_len - str_copy_len),
- BLOCK_OP_NORMAL);
- return NULL_RTX;
- }
- else if (TREE_CODE (exp) == MEM_REF
- && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == STRING_CST
- && integer_zerop (TREE_OPERAND (exp, 1))
- && !nontemporal && !call_param_p
- && TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
- {
- /* Optimize initialization of an array with a STRING_CST. */
- HOST_WIDE_INT exp_len, str_copy_len;
- rtx dest_mem;
- tree str = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
-
- exp_len = int_expr_size (exp);
- if (exp_len <= 0)
+ if (TREE_STRING_LENGTH (str) <= 0)
goto normal_expr;
str_copy_len = strlen (TREE_STRING_POINTER (str));
goto normal_expr;
str_copy_len = TREE_STRING_LENGTH (str);
- if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
+ if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0
+ && TREE_STRING_POINTER (str)[TREE_STRING_LENGTH (str) - 1] == '\0')
{
str_copy_len += STORE_MAX_PIECES - 1;
str_copy_len &= ~(STORE_MAX_PIECES - 1);
}
str_copy_len = MIN (str_copy_len, exp_len);
if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (str)),
+ CONST_CAST (char *, TREE_STRING_POINTER (str)),
MEM_ALIGN (target), false))
goto normal_expr;
dest_mem = store_by_pieces (dest_mem,
str_copy_len, builtin_strncpy_read_str,
- CONST_CAST(char *, TREE_STRING_POINTER (str)),
+ CONST_CAST (char *,
+ TREE_STRING_POINTER (str)),
MEM_ALIGN (target), false,
exp_len > str_copy_len ? 1 : 0);
if (exp_len > str_copy_len)
/* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
but TARGET is not valid memory reference, TEMP will differ
from TARGET although it is really the same location. */
- && !(alt_rtl && rtx_equal_p (alt_rtl, target))
+ && !(alt_rtl
+ && rtx_equal_p (alt_rtl, target)
+ && !side_effects_p (alt_rtl)
+ && !side_effects_p (target))
/* If there's nothing to copy, don't bother. Don't call
expr_size unless necessary, because some front-ends (C++)
expr_size-hook must not be given objects that are not
{
int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
if (GET_MODE (target) == BLKmode
- || GET_MODE (temp) == BLKmode)
+ && GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
(call_param_p
? BLOCK_OP_CALL_PARM
: BLOCK_OP_NORMAL));
+ else if (GET_MODE (target) == BLKmode)
+ store_bit_field (target, INTVAL (expr_size (exp)) * BITS_PER_UNIT,
+ 0, GET_MODE (temp), temp);
else
convert_move (target, temp, unsignedp);
}
int n_elts_here = tree_low_cst
(int_const_binop (TRUNC_DIV_EXPR,
TYPE_SIZE (TREE_TYPE (value)),
- TYPE_SIZE (elttype), 0), 1);
+ TYPE_SIZE (elttype)), 1);
count += n_elts_here;
if (mostly_zeros_p (value))
&& bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (temp))
&& TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
- size_int (GET_MODE_BITSIZE (GET_MODE (temp))
- - bitsize),
+ GET_MODE_BITSIZE (GET_MODE (temp)) - bitsize,
NULL_RTX, 1);
/* Unless MODE is VOIDmode or BLKmode, convert TEMP to
if (to_rtx == target)
to_rtx = copy_rtx (to_rtx);
- MEM_SET_IN_STRUCT_P (to_rtx, 1);
+ if (!MEM_SCALAR_P (to_rtx))
+ MEM_IN_STRUCT_P (to_rtx) = 1;
if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
set_mem_alias_set (to_rtx, alias_set);
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
- for (idx = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
- idx++)
+ FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce)
if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
|| !safe_from_p (x, ce->value, 0))
return 0;
}
break;
- case MISALIGNED_INDIRECT_REF:
- case INDIRECT_REF:
+ case MEM_REF:
if (MEM_P (x)
&& alias_sets_conflict_p (MEM_ALIAS_SET (x),
get_alias_set (exp)))
tmp = convert_memory_address_addr_space (tmode, tmp, as);
if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
- result = gen_rtx_PLUS (tmode, result, tmp);
+ result = simplify_gen_binary (PLUS, tmode, result, tmp);
else
{
subtarget = bitpos ? NULL_RTX : target;
int ignore;
bool reduce_bit_field;
location_t loc = ops->location;
- tree treeop0, treeop1;
+ tree treeop0, treeop1, treeop2;
#define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
? reduce_to_bit_field_precision ((expr), \
target, \
treeop0 = ops->op0;
treeop1 = ops->op1;
+ treeop2 = ops->op2;
/* We should be called only on simple (binary or unary) expressions,
exactly those that are valid in gimple expressions that aren't
else if (CONSTANT_P (op0))
{
tree inner_type = TREE_TYPE (treeop0);
- enum machine_mode inner_mode = TYPE_MODE (inner_type);
+ enum machine_mode inner_mode = GET_MODE (op0);
+
+ if (inner_mode == VOIDmode)
+ inner_mode = TYPE_MODE (inner_type);
if (modifier == EXPAND_INITIALIZER)
op0 = simplify_gen_subreg (mode, op0, inner_mode,
}
}
+ /* Use TER to expand pointer addition of a negated value
+ as pointer subtraction. */
+ if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
+ || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
+ && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
+ && TREE_CODE (treeop1) == SSA_NAME
+ && TYPE_MODE (TREE_TYPE (treeop0))
+ == TYPE_MODE (TREE_TYPE (treeop1)))
+ {
+ gimple def = get_def_for_expr (treeop1, NEGATE_EXPR);
+ if (def)
+ {
+ treeop1 = gimple_assign_rhs1 (def);
+ code = MINUS_EXPR;
+ goto do_minus;
+ }
+ }
+
/* No sense saving up arithmetic to be done
if it's all in the wrong mode to form part of an address.
And force_operand won't know whether to sign-extend or
return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
case MINUS_EXPR:
+ do_minus:
/* For initializers, we are allowed to return a MINUS of two
symbolic constants. Here we handle all cases when both operands
are constant. */
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (ops->op2);
+ op2 = expand_normal (treeop2);
target = expand_widen_pattern_expr (ops, op0, op1, op2,
target, unsignedp);
return target;
if (optab_handler (this_optab, mode) != CODE_FOR_nothing)
{
if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
- expand_operands (treeop0, treeop1, subtarget, &op0, &op1,
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
EXPAND_NORMAL);
else
- expand_operands (treeop0, treeop1, subtarget, &op1, &op0,
+ expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
EXPAND_NORMAL);
goto binop3;
}
optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
- if (mode == GET_MODE_2XWIDER_MODE (innermode))
+ if (mode == GET_MODE_2XWIDER_MODE (innermode)
+ && TREE_CODE (treeop0) != INTEGER_CST)
{
if (optab_handler (this_optab, mode) != CODE_FOR_nothing)
{
expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
+ case FMA_EXPR:
+ {
+ optab opt = fma_optab;
+ gimple def0, def2;
+
+ /* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
+ call. */
+ if (optab_handler (fma_optab, mode) == CODE_FOR_nothing)
+ {
+ tree fn = mathfn_built_in (TREE_TYPE (treeop0), BUILT_IN_FMA);
+ tree call_expr;
+
+ gcc_assert (fn != NULL_TREE);
+ call_expr = build_call_expr (fn, 3, treeop0, treeop1, treeop2);
+ return expand_builtin (call_expr, target, subtarget, mode, false);
+ }
+
+ def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
+ def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
+
+ op0 = op2 = NULL;
+
+ if (def0 && def2
+ && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnms_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+ else if (def0
+ && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnma_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ }
+ else if (def2
+ && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fms_optab;
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+
+ if (op0 == NULL)
+ op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
+ if (op2 == NULL)
+ op2 = expand_normal (treeop2);
+ op1 = expand_normal (treeop1);
+
+ return expand_ternary_op (TYPE_MODE (type), opt,
+ op0, op1, op2, target, 0);
+ }
+
case MULT_EXPR:
/* If this is a fixed-point operation, then we cannot use the code
below because "expand_mult" doesn't support sat/no-sat fixed-point
target = 0;
op0 = expand_expr (treeop0, subtarget,
VOIDmode, EXPAND_NORMAL);
- temp = expand_shift (code, mode, op0, treeop1, target,
- unsignedp);
+ temp = expand_variable_shift (code, mode, op0, treeop1, target,
+ unsignedp);
if (code == LSHIFT_EXPR)
temp = REDUCE_BIT_FIELD (temp);
return temp;
op1 = gen_label_rtx ();
jumpifnot_1 (code, treeop0, treeop1, op1, -1);
- emit_move_insn (target, const1_rtx);
+ if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
+ emit_move_insn (target, constm1_rtx);
+ else
+ emit_move_insn (target, const1_rtx);
emit_label (op1);
return target;
case VEC_UNPACK_LO_EXPR:
{
op0 = expand_normal (treeop0);
- this_optab = optab_for_tree_code (code, type, optab_default);
temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
target, unsignedp);
gcc_assert (temp);
{
op0 = expand_normal (treeop0);
/* The signedness is determined from input operand. */
- this_optab = optab_for_tree_code (code,
- TREE_TYPE (treeop0),
- optab_default);
temp = expand_widen_pattern_expr
(ops, op0, NULL_RTX, NULL_RTX,
target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
mode = TYPE_MODE (TREE_TYPE (treeop0));
goto binop;
+ case DOT_PROD_EXPR:
+ {
+ tree oprnd0 = treeop0;
+ tree oprnd1 = treeop1;
+ tree oprnd2 = treeop2;
+ rtx op2;
+
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ target = expand_widen_pattern_expr (ops, op0, op1, op2,
+ target, unsignedp);
+ return target;
+ }
+
+ case REALIGN_LOAD_EXPR:
+ {
+ tree oprnd0 = treeop0;
+ tree oprnd1 = treeop1;
+ tree oprnd2 = treeop2;
+ rtx op2;
+
+ this_optab = optab_for_tree_code (code, type, optab_default);
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
+ target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+ }
+
default:
gcc_unreachable ();
}
int unsignedp;
enum machine_mode mode;
enum tree_code code = TREE_CODE (exp);
- optab this_optab;
rtx subtarget, original_target;
int ignore;
tree context;
{
temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
if (MEM_P (temp))
- temp = copy_to_reg (temp);
+ copy_to_reg (temp);
return const0_rtx;
}
NULL);
g = get_gimple_for_ssa_name (exp);
+ /* For EXPAND_INITIALIZER try harder to get something simpler. */
+ if (g == NULL
+ && modifier == EXPAND_INITIALIZER
+ && !SSA_NAME_IS_DEFAULT_DEF (exp)
+ && (optimize || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
+ && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
+ g = SSA_NAME_DEF_STMT (exp);
if (g)
return expand_expr_real (gimple_assign_rhs_to_tree (g), target, tmode,
modifier, NULL);
gcc_assert (decl_rtl);
decl_rtl = copy_rtx (decl_rtl);
/* Record writes to register variables. */
- if (modifier == EXPAND_WRITE && REG_P (decl_rtl)
- && REGNO (decl_rtl) < FIRST_PSEUDO_REGISTER)
- {
- int i = REGNO (decl_rtl);
- int nregs = hard_regno_nregs[i][GET_MODE (decl_rtl)];
- while (nregs)
- {
- SET_HARD_REG_BIT (crtl->asm_clobbers, i);
- i++;
- nregs--;
- }
- }
+ if (modifier == EXPAND_WRITE
+ && REG_P (decl_rtl)
+ && HARD_REGISTER_P (decl_rtl))
+ add_to_hard_reg_set (&crtl->asm_clobbers,
+ GET_MODE (decl_rtl), REGNO (decl_rtl));
/* Ensure variable marked as used even if it doesn't go through
a parser. If it hasn't be used yet, write out an external
gcc_assert (!context
|| context == current_function_decl
|| TREE_STATIC (exp)
+ || DECL_EXTERNAL (exp)
/* ??? C++ creates functions that are not TREE_STATIC. */
|| TREE_CODE (exp) == FUNCTION_DECL);
if (code == SSA_NAME
&& (g = SSA_NAME_DEF_STMT (ssa_name))
&& gimple_code (g) == GIMPLE_CALL)
- pmode = promote_function_mode (type, mode, &unsignedp,
- TREE_TYPE
- (TREE_TYPE (gimple_call_fn (g))),
- 2);
+ {
+ gcc_assert (!gimple_call_internal_p (g));
+ pmode = promote_function_mode (type, mode, &unsignedp,
+ gimple_call_fntype (g),
+ 2);
+ }
else
pmode = promote_decl_mode (exp, &unsignedp);
gcc_assert (GET_MODE (decl_rtl) == pmode);
return expand_constructor (exp, target, modifier, false);
- case MISALIGNED_INDIRECT_REF:
- case INDIRECT_REF:
+ case TARGET_MEM_REF:
{
- tree exp1 = treeop0;
- addr_space_t as = ADDR_SPACE_GENERIC;
-
- if (modifier != EXPAND_WRITE)
- {
- tree t;
-
- t = fold_read_from_constant_string (exp);
- if (t)
- return expand_expr (t, target, tmode, modifier);
- }
-
- if (POINTER_TYPE_P (TREE_TYPE (exp1)))
- as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp1)));
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
+ struct mem_address addr;
+ int icode, align;
- op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+ get_address_description (exp, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
op0 = memory_address_addr_space (mode, op0, as);
-
temp = gen_rtx_MEM (mode, op0);
-
set_mem_attributes (temp, exp, 0);
set_mem_addr_space (temp, as);
-
- /* Resolve the misalignment now, so that we don't have to remember
- to resolve it later. Of course, this only works for reads. */
- if (code == MISALIGNED_INDIRECT_REF)
+ align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
+ get_object_alignment (exp, BIGGEST_ALIGNMENT));
+ if (mode != BLKmode
+ && (unsigned) align < GET_MODE_ALIGNMENT (mode)
+ /* If the target does not have special handling for unaligned
+ loads of mode then it can use regular moves for them. */
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
{
- int icode;
rtx reg, insn;
- gcc_assert (modifier == EXPAND_NORMAL
- || modifier == EXPAND_STACK_PARM);
-
- /* The vectorizer should have already checked the mode. */
- icode = optab_handler (movmisalign_optab, mode);
- gcc_assert (icode != CODE_FOR_nothing);
-
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail. */
reg = gen_reg_rtx (mode);
/* Nor can the insn generator. */
insn = GEN_FCN (icode) (reg, temp);
+ gcc_assert (insn != NULL_RTX);
emit_insn (insn);
return reg;
}
-
return temp;
}
- case TARGET_MEM_REF:
- {
- addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
- struct mem_address addr;
- tree base;
-
- get_address_description (exp, &addr);
- op0 = addr_for_mem_ref (&addr, as, true);
- op0 = memory_address_addr_space (mode, op0, as);
- temp = gen_rtx_MEM (mode, op0);
- set_mem_attributes (temp, TMR_ORIGINAL (exp), 0);
- set_mem_addr_space (temp, as);
- base = get_base_address (TMR_ORIGINAL (exp));
- if (base
- && INDIRECT_REF_P (base)
- && TMR_BASE (exp)
- && TREE_CODE (TMR_BASE (exp)) == SSA_NAME
- && POINTER_TYPE_P (TREE_TYPE (TMR_BASE (exp))))
- {
- set_mem_expr (temp, build1 (INDIRECT_REF,
- TREE_TYPE (exp), TMR_BASE (exp)));
- set_mem_offset (temp, NULL_RTX);
- }
- }
- return temp;
-
case MEM_REF:
{
addr_space_t as
enum machine_mode address_mode;
tree base = TREE_OPERAND (exp, 0);
gimple def_stmt;
+ int icode, align;
/* Handle expansion of non-aliased memory with non-BLKmode. That
might end up in a register. */
if (TREE_CODE (base) == ADDR_EXPR)
address_mode = targetm.addr_space.address_mode (as);
base = TREE_OPERAND (exp, 0);
if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
- base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
- gimple_assign_rhs1 (def_stmt),
- gimple_assign_rhs2 (def_stmt));
- op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_NORMAL);
- op0 = convert_memory_address_addr_space (address_mode, op0, as);
+ {
+ tree mask = gimple_assign_rhs2 (def_stmt);
+ base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
+ gimple_assign_rhs1 (def_stmt), mask);
+ TREE_OPERAND (exp, 0) = base;
+ }
+ align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
+ get_object_alignment (exp, BIGGEST_ALIGNMENT));
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
+ op0 = memory_address_addr_space (address_mode, op0, as);
if (!integer_zerop (TREE_OPERAND (exp, 1)))
{
rtx off
set_mem_addr_space (temp, as);
if (TREE_THIS_VOLATILE (exp))
MEM_VOLATILE_P (temp) = 1;
+ if (mode != BLKmode
+ && (unsigned) align < GET_MODE_ALIGNMENT (mode)
+ /* If the target does not have special handling for unaligned
+ loads of mode then it can use regular moves for them. */
+ && ((icode = optab_handler (movmisalign_optab, mode))
+ != CODE_FOR_nothing))
+ {
+ rtx reg, insn;
+
+ /* We've already validated the memory, and we're creating a
+ new pseudo destination. The predicates really can't fail. */
+ reg = gen_reg_rtx (mode);
+
+ /* Nor can the insn generator. */
+ insn = GEN_FCN (icode) (reg, temp);
+ emit_insn (insn);
+
+ return reg;
+ }
return temp;
}
&& TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
&& TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
&& TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK
- && targetm.binds_local_p (array))
+ && const_value_known_p (array))
{
if (TREE_CODE (index) == INTEGER_CST)
{
}
else
{
- tree count
- = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (imode) - bitsize);
+ int count = GET_MODE_BITSIZE (imode) - bitsize;
op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
target, 0);
HOST_WIDE_INT bitsize, bitpos;
tree offset;
int volatilep = 0, must_force_mem;
+ bool packedp = false;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
&mode1, &unsignedp, &volatilep, true);
rtx orig_op0, memloc;
infinitely recurse. */
gcc_assert (tem != exp);
+ if (TYPE_PACKED (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ || (TREE_CODE (TREE_OPERAND (exp, 1)) == FIELD_DECL
+ && DECL_PACKED (TREE_OPERAND (exp, 1))))
+ packedp = true;
+
/* If TEM's type is a union of variable size, pass TARGET to the inner
computation, since it will need a temporary and TARGET is known
to have to do. This occurs in unchecked conversion in Ada. */
/* If the bitfield is volatile, we want to access it in the
- field's mode, not the computed mode. */
- if (volatilep
- && GET_CODE (op0) == MEM
- && flag_strict_volatile_bitfields > 0)
- op0 = adjust_address (op0, mode1, 0);
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (op0))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ op0 = adjust_address (op0, mode1, 0);
+ else if (GET_MODE (op0) == VOIDmode)
+ op0 = adjust_address (op0, BLKmode, 0);
+ }
mode2
= CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
constant and we don't need a memory reference. */
if (CONSTANT_P (op0)
&& mode2 != BLKmode
- && LEGITIMATE_CONSTANT_P (op0)
+ && targetm.legitimate_constant_p (mode2, op0)
&& !must_force_mem)
op0 = force_reg (mode2, op0);
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER)
/* If the field is volatile, we always want an aligned
- access. */
- || (volatilep && flag_strict_volatile_bitfields > 0)
+ access. Only do this if the access is not already naturally
+ aligned, otherwise "normal" (non-bitfield) volatile fields
+ become non-addressable. */
+ || (volatilep && flag_strict_volatile_bitfields > 0
+ && (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
- op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
+ op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, packedp,
(modifier == EXPAND_STACK_PARM
? NULL_RTX : target),
ext_mode, ext_mode);
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
&& bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (op0)))
op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0,
- size_int (GET_MODE_BITSIZE (GET_MODE (op0))
- - bitsize),
- op0, 1);
+ GET_MODE_BITSIZE (GET_MODE (op0))
+ - bitsize, op0, 1);
/* If the result type is BLKmode, store the data into a temporary
of the appropriate type, but with the mode corresponding to the
{
if (GET_CODE (op0) == SUBREG)
op0 = force_reg (GET_MODE (op0), op0);
- op0 = gen_lowpart (mode, op0);
+ temp = gen_lowpart_common (mode, op0);
+ if (temp)
+ op0 = temp;
+ else
+ {
+ if (!REG_P (op0) && !MEM_P (op0))
+ op0 = force_reg (GET_MODE (op0), op0);
+ op0 = gen_lowpart (mode, op0);
+ }
}
/* If both types are integral, convert from one mode to the other. */
else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
return expand_expr_real (treeop0, original_target, tmode,
modifier, alt_rtl);
- case REALIGN_LOAD_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
- tree oprnd2 = treeop2;
- rtx op2;
-
- this_optab = optab_for_tree_code (code, type, optab_default);
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (oprnd2);
- temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
- target, unsignedp);
- gcc_assert (temp);
- return temp;
- }
-
- case DOT_PROD_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
- tree oprnd2 = treeop2;
- rtx op2;
-
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (oprnd2);
- target = expand_widen_pattern_expr (&ops, op0, op1, op2,
- target, unsignedp);
- return target;
- }
-
case COMPOUND_LITERAL_EXPR:
{
/* Initialize the anonymous variable declared in the compound
}
else
{
- tree count = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (GET_MODE (exp)) - prec);
- exp = expand_shift (LSHIFT_EXPR, GET_MODE (exp), exp, count, target, 0);
- return expand_shift (RSHIFT_EXPR, GET_MODE (exp), exp, count, target, 0);
+ int count = GET_MODE_BITSIZE (GET_MODE (exp)) - prec;
+ exp = expand_shift (LSHIFT_EXPR, GET_MODE (exp),
+ exp, count, target, 0);
+ return expand_shift (RSHIFT_EXPR, GET_MODE (exp),
+ exp, count, target, 0);
}
}
\f
*ptr_offset = fold_convert (sizetype, offset);
return array;
}
- else if (TREE_CODE (array) == VAR_DECL)
+ else if (TREE_CODE (array) == VAR_DECL
+ || TREE_CODE (array) == CONST_DECL)
{
int length;
/* Variables initialized to string literals can be handled too. */
- if (DECL_INITIAL (array) == NULL_TREE
+ if (!const_value_known_p (array)
+ || !DECL_INITIAL (array)
|| TREE_CODE (DECL_INITIAL (array)) != STRING_CST)
return 0;
- /* If they are read-only, non-volatile and bind locally. */
- if (! TREE_READONLY (array)
- || TREE_SIDE_EFFECTS (array)
- || ! targetm.binds_local_p (array))
- return 0;
-
/* Avoid const char foo[4] = "abcde"; */
if (DECL_SIZE_UNIT (array) == NULL_TREE
|| TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
if ((code == NE || code == EQ)
&& TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1)
- && integer_pow2p (TREE_OPERAND (arg0, 1)))
+ && integer_pow2p (TREE_OPERAND (arg0, 1))
+ && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
{
tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
return expand_expr (fold_single_bit_test (loc,
/* Try a cstore if possible. */
return emit_store_flag_force (target, code, op0, op1,
- operand_mode, unsignedp, 1);
+ operand_mode, unsignedp,
+ (TYPE_PRECISION (ops->type) == 1
+ && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
}
\f
rtx table_label ATTRIBUTE_UNUSED, rtx default_label,
rtx fallback_label ATTRIBUTE_UNUSED)
{
+ struct expand_operand ops[5];
enum machine_mode index_mode = SImode;
int index_bits = GET_MODE_BITSIZE (index_mode);
rtx op1, op2, index;
- enum machine_mode op_mode;
if (! HAVE_casesi)
return 0;
do_pending_stack_adjust ();
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[0].mode;
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[0].predicate)
- (index, op_mode))
- index = copy_to_mode_reg (op_mode, index);
-
op1 = expand_normal (minval);
-
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[1].mode;
- op1 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (minval)),
- op1, TYPE_UNSIGNED (TREE_TYPE (minval)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[1].predicate)
- (op1, op_mode))
- op1 = copy_to_mode_reg (op_mode, op1);
-
op2 = expand_normal (range);
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[2].mode;
- op2 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (range)),
- op2, TYPE_UNSIGNED (TREE_TYPE (range)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[2].predicate)
- (op2, op_mode))
- op2 = copy_to_mode_reg (op_mode, op2);
-
- emit_jump_insn (gen_casesi (index, op1, op2,
- table_label, !default_label
- ? fallback_label : default_label));
+ create_input_operand (&ops[0], index, index_mode);
+ create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
+ create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
+ create_fixed_operand (&ops[3], table_label);
+ create_fixed_operand (&ops[4], (default_label
+ ? default_label
+ : fallback_label));
+ expand_jump_insn (CODE_FOR_casesi, 5, ops);
return 1;
}
return gen_rtx_CONST_VECTOR (mode, v);
}
-
-/* Build a decl for a EH personality function named NAME. */
+/* Build a decl for a personality function given a language prefix. */
tree
-build_personality_function (const char *name)
+build_personality_function (const char *lang)
{
+ const char *unwind_and_version;
tree decl, type;
+ char *name;
+
+ switch (targetm.except_unwind_info (&global_options))
+ {
+ case UI_NONE:
+ return NULL;
+ case UI_SJLJ:
+ unwind_and_version = "_sj0";
+ break;
+ case UI_DWARF2:
+ case UI_TARGET:
+ unwind_and_version = "_v0";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
type = build_function_type_list (integer_type_node, integer_type_node,
long_long_unsigned_type_node,