tree, tree, int, int);
static void store_constructor (tree, rtx, int, HOST_WIDE_INT);
static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode,
- tree, enum machine_mode, int, tree, int);
+ tree, tree, int);
static unsigned HOST_WIDE_INT highest_pow2_factor (tree);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (tree, tree);
#endif
static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx);
static rtx const_vector_from_tree (tree);
+static void write_complex_part (rtx, rtx, bool);
/* Record for each mode whether we can move a register directly to or
from an object of that mode in memory. If we can't, we won't try
rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
if (!tmp || !REG_P (tmp))
return false;
- if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode,
- NULL_TREE, 1))
+ if (targetm.calls.arg_partial_bytes (&args_so_far, mode, NULL, 1))
return false;
FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1);
}
return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
}
-/* Emit code to move a block ORIG_SRC of type TYPE to a block DST,
- where DST is non-consecutive registers represented by a PARALLEL.
- SSIZE represents the total size of block ORIG_SRC in bytes, or -1
- if not known. */
+/* A subroutine of emit_group_load. Arguments as for emit_group_load,
+ except that values are placed in TMPS[i], and must later be moved
+ into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
-void
-emit_group_load (rtx dst, rtx orig_src, tree type ATTRIBUTE_UNUSED, int ssize)
+static void
+emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
{
- rtx *tmps, src;
+ rtx src;
int start, i;
+ enum machine_mode m = GET_MODE (orig_src);
gcc_assert (GET_CODE (dst) == PARALLEL);
+ if (m != VOIDmode
+ && !SCALAR_INT_MODE_P (m)
+ && !MEM_P (orig_src)
+ && GET_CODE (orig_src) != CONCAT)
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (orig_src));
+ if (imode == BLKmode)
+ src = assign_stack_temp (GET_MODE (orig_src), ssize, 0);
+ else
+ src = gen_reg_rtx (imode);
+ if (imode != BLKmode)
+ src = gen_lowpart (GET_MODE (orig_src), src);
+ emit_move_insn (src, orig_src);
+ /* ...and back again. */
+ if (imode != BLKmode)
+ src = gen_lowpart (imode, src);
+ emit_group_load_1 (tmps, dst, src, type, ssize);
+ return;
+ }
+
/* Check for a NULL entry, used to indicate that the parameter goes
both on the stack and in registers. */
if (XEXP (XVECEXP (dst, 0, 0), 0))
else
start = 1;
- tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0));
-
/* Process the pieces. */
for (i = start; i < XVECLEN (dst, 0); i++)
{
tmps[i] = gen_reg_rtx (mode);
emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
}
+ else if (COMPLEX_MODE_P (mode)
+ && GET_MODE (src) == mode
+ && bytelen == GET_MODE_SIZE (mode))
+ /* Let emit_move_complex do the bulk of the work. */
+ tmps[i] = src;
else if (GET_CODE (src) == CONCAT)
{
unsigned int slen = GET_MODE_SIZE (GET_MODE (src));
else
{
rtx mem;
-
+
gcc_assert (!bytepos);
mem = assign_stack_temp (GET_MODE (src), slen, 0);
emit_move_insn (mem, src);
- tmps[i] = adjust_address (mem, mode, 0);
+ tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
+ 0, 1, NULL_RTX, mode, mode);
}
}
/* FIXME: A SIMD parallel will eventually lead to a subreg of a
tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
build_int_cst (NULL_TREE, shift), tmps[i], 0);
}
+}
+
+/* Emit code to move a block SRC of type TYPE to a block DST,
+ where DST is non-consecutive registers represented by a PARALLEL.
+ SSIZE represents the total size of block ORIG_SRC in bytes, or -1
+ if not known. */
+
+void
+emit_group_load (rtx dst, rtx src, tree type, int ssize)
+{
+ rtx *tmps;
+ int i;
+
+ tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0));
+ emit_group_load_1 (tmps, dst, src, type, ssize);
/* Copy the extracted pieces into the proper (probable) hard regs. */
- for (i = start; i < XVECLEN (dst, 0); i++)
- emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0), tmps[i]);
+ for (i = 0; i < XVECLEN (dst, 0); i++)
+ {
+ rtx d = XEXP (XVECEXP (dst, 0, i), 0);
+ if (d == NULL)
+ continue;
+ emit_move_insn (d, tmps[i]);
+ }
+}
+
+/* Similar, but load SRC into new pseudos in a format that looks like
+ PARALLEL. This can later be fed to emit_group_move to get things
+ in the right place. */
+
+rtx
+emit_group_load_into_temps (rtx parallel, rtx src, tree type, int ssize)
+{
+ rtvec vec;
+ int i;
+
+ vec = rtvec_alloc (XVECLEN (parallel, 0));
+ emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
+
+ /* Convert the vector to look just like the original PARALLEL, except
+ with the computed values. */
+ for (i = 0; i < XVECLEN (parallel, 0); i++)
+ {
+ rtx e = XVECEXP (parallel, 0, i);
+ rtx d = XEXP (e, 0);
+
+ if (d)
+ {
+ d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
+ e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
+ }
+ RTVEC_ELT (vec, i) = e;
+ }
+
+ return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
}
/* Emit code to move a block SRC to block DST, where SRC and DST are
XEXP (XVECEXP (src, 0, i), 0));
}
+/* Move a group of registers represented by a PARALLEL into pseudos. */
+
+rtx
+emit_group_move_into_temps (rtx src)
+{
+ rtvec vec = rtvec_alloc (XVECLEN (src, 0));
+ int i;
+
+ for (i = 0; i < XVECLEN (src, 0); i++)
+ {
+ rtx e = XVECEXP (src, 0, i);
+ rtx d = XEXP (e, 0);
+
+ if (d)
+ e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
+ RTVEC_ELT (vec, i) = e;
+ }
+
+ return gen_rtx_PARALLEL (GET_MODE (src), vec);
+}
+
/* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
where SRC is non-consecutive registers represented by a PARALLEL.
SSIZE represents the total size of block ORIG_DST, or -1 if not
{
rtx *tmps, dst;
int start, i;
+ enum machine_mode m = GET_MODE (orig_dst);
gcc_assert (GET_CODE (src) == PARALLEL);
+ if (!SCALAR_INT_MODE_P (m)
+ && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (orig_dst));
+ if (imode == BLKmode)
+ dst = assign_stack_temp (GET_MODE (orig_dst), ssize, 0);
+ else
+ dst = gen_reg_rtx (imode);
+ emit_group_store (dst, src, type, ssize);
+ if (imode != BLKmode)
+ dst = gen_lowpart (GET_MODE (orig_dst), dst);
+ emit_move_insn (orig_dst, dst);
+ return;
+ }
+
/* Check for a NULL entry, used to indicate that the parameter goes
both on the stack and in registers. */
if (XEXP (XVECEXP (src, 0, 0), 0))
rtx
clear_storage (rtx object, rtx size)
{
- rtx retval = 0;
- unsigned int align = (MEM_P (object) ? MEM_ALIGN (object)
- : GET_MODE_ALIGNMENT (GET_MODE (object)));
+ enum machine_mode mode = GET_MODE (object);
+ unsigned int align;
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
- if (GET_MODE (object) != BLKmode
+ if (mode != BLKmode
&& GET_CODE (size) == CONST_INT
- && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (object)))
- emit_move_insn (object, CONST0_RTX (GET_MODE (object)));
- else
+ && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (mode))
{
- if (size == const0_rtx)
- ;
- else if (GET_CODE (size) == CONST_INT
- && CLEAR_BY_PIECES_P (INTVAL (size), align))
- clear_by_pieces (object, INTVAL (size), align);
- else if (clear_storage_via_clrmem (object, size, align))
- ;
- else
- retval = clear_storage_via_libcall (object, size);
+ rtx zero = CONST0_RTX (mode);
+ if (zero != NULL)
+ {
+ emit_move_insn (object, zero);
+ return NULL;
+ }
+
+ if (COMPLEX_MODE_P (mode))
+ {
+ zero = CONST0_RTX (GET_MODE_INNER (mode));
+ if (zero != NULL)
+ {
+ write_complex_part (object, zero, 0);
+ write_complex_part (object, zero, 1);
+ return NULL;
+ }
+ }
}
- return retval;
+ if (size == const0_rtx)
+ return NULL;
+
+ align = MEM_ALIGN (object);
+
+ if (GET_CODE (size) == CONST_INT
+ && CLEAR_BY_PIECES_P (INTVAL (size), align))
+ clear_by_pieces (object, INTVAL (size), align);
+ else if (clear_storage_via_clrmem (object, size, align))
+ ;
+ else
+ return clear_storage_via_libcall (object, size);
+
+ return NULL;
}
/* A subroutine of clear_storage. Expand a clrmem pattern;
return block_clear_fn;
}
\f
-/* Generate code to copy Y into X.
- Both Y and X must have the same mode, except that
- Y can be a constant with VOIDmode.
- This mode cannot be BLKmode; use emit_block_move for that.
+/* Write to one of the components of the complex value CPLX. Write VAL to
+ the real part if IMAG_P is false, and the imaginary part if its true. */
- Return the last instruction emitted. */
+static void
+write_complex_part (rtx cplx, rtx val, bool imag_p)
+{
+ enum machine_mode cmode;
+ enum machine_mode imode;
+ unsigned ibitsize;
-rtx
-emit_move_insn (rtx x, rtx y)
+ if (GET_CODE (cplx) == CONCAT)
+ {
+ emit_move_insn (XEXP (cplx, imag_p), val);
+ return;
+ }
+
+ cmode = GET_MODE (cplx);
+ imode = GET_MODE_INNER (cmode);
+ ibitsize = GET_MODE_BITSIZE (imode);
+
+ /* If the sub-object is at least word sized, then we know that subregging
+ will work. This special case is important, since store_bit_field
+ wants to operate on integer modes, and there's rarely an OImode to
+ correspond to TCmode. */
+ if (ibitsize >= BITS_PER_WORD
+ /* For hard regs we have exact predicates. Assume we can split
+ the original object if it spans an even number of hard regs.
+ This special case is important for SCmode on 64-bit platforms
+ where the natural size of floating-point regs is 32-bit. */
+ || (GET_CODE (cplx) == REG
+ && REGNO (cplx) < FIRST_PSEUDO_REGISTER
+ && hard_regno_nregs[REGNO (cplx)][cmode] % 2 == 0)
+ /* For MEMs we always try to make a "subreg", that is to adjust
+ the MEM, because store_bit_field may generate overly
+ convoluted RTL for sub-word fields. */
+ || MEM_P (cplx))
+ {
+ rtx part = simplify_gen_subreg (imode, cplx, cmode,
+ imag_p ? GET_MODE_SIZE (imode) : 0);
+ if (part)
+ {
+ emit_move_insn (part, val);
+ return;
+ }
+ else
+ /* simplify_gen_subreg may fail for sub-word MEMs. */
+ gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
+ }
+
+ store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, imode, val);
+}
+
+/* Extract one of the components of the complex value CPLX. Extract the
+ real part if IMAG_P is false, and the imaginary part if it's true. */
+
+static rtx
+read_complex_part (rtx cplx, bool imag_p)
{
- enum machine_mode mode = GET_MODE (x);
- rtx y_cst = NULL_RTX;
- rtx last_insn, set;
+ enum machine_mode cmode, imode;
+ unsigned ibitsize;
- gcc_assert (mode != BLKmode
- && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
+ if (GET_CODE (cplx) == CONCAT)
+ return XEXP (cplx, imag_p);
- if (CONSTANT_P (y))
+ cmode = GET_MODE (cplx);
+ imode = GET_MODE_INNER (cmode);
+ ibitsize = GET_MODE_BITSIZE (imode);
+
+ /* Special case reads from complex constants that got spilled to memory. */
+ if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
{
- if (optimize
- && SCALAR_FLOAT_MODE_P (GET_MODE (x))
- && (last_insn = compress_float_constant (x, y)))
- return last_insn;
+ tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
+ if (decl && TREE_CODE (decl) == COMPLEX_CST)
+ {
+ tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
+ if (CONSTANT_CLASS_P (part))
+ return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
+ }
+ }
- y_cst = y;
+ /* If the sub-object is at least word sized, then we know that subregging
+ will work. This special case is important, since extract_bit_field
+ wants to operate on integer modes, and there's rarely an OImode to
+ correspond to TCmode. */
+ if (ibitsize >= BITS_PER_WORD
+ /* For hard regs we have exact predicates. Assume we can split
+ the original object if it spans an even number of hard regs.
+ This special case is important for SCmode on 64-bit platforms
+ where the natural size of floating-point regs is 32-bit. */
+ || (GET_CODE (cplx) == REG
+ && REGNO (cplx) < FIRST_PSEUDO_REGISTER
+ && hard_regno_nregs[REGNO (cplx)][cmode] % 2 == 0)
+ /* For MEMs we always try to make a "subreg", that is to adjust
+ the MEM, because extract_bit_field may generate overly
+ convoluted RTL for sub-word fields. */
+ || MEM_P (cplx))
+ {
+ rtx ret = simplify_gen_subreg (imode, cplx, cmode,
+ imag_p ? GET_MODE_SIZE (imode) : 0);
+ if (ret)
+ return ret;
+ else
+ /* simplify_gen_subreg may fail for sub-word MEMs. */
+ gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
+ }
- if (!LEGITIMATE_CONSTANT_P (y))
- {
- y = force_const_mem (mode, y);
+ return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
+ true, NULL_RTX, imode, imode);
+}
+\f
+/* A subroutine of emit_move_via_alt_mode. Yet another lowpart generator.
+ NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
+ represented in NEW_MODE. */
- /* If the target's cannot_force_const_mem prevented the spill,
- assume that the target's move expanders will also take care
- of the non-legitimate constant. */
- if (!y)
- y = y_cst;
- }
+static rtx
+emit_move_change_mode (enum machine_mode new_mode,
+ enum machine_mode old_mode, rtx x)
+{
+ rtx ret;
+
+ if (reload_in_progress && MEM_P (x))
+ {
+ /* We can't use gen_lowpart here because it may call change_address
+ which is not appropriate if we were called when a reload was in
+ progress. We don't have to worry about changing the address since
+ the size in bytes is supposed to be the same. Copy the MEM to
+ change the mode and move any substitutions from the old MEM to
+ the new one. */
+
+ ret = adjust_address_nv (x, new_mode, 0);
+ copy_replacements (x, ret);
+ }
+ else
+ {
+ /* Note that we do want simplify_subreg's behaviour of validating
+ that the new mode is ok for a hard register. If we were to use
+ simplify_gen_subreg, we would create the subreg, but would
+ probably run into the target not being able to implement it. */
+ ret = simplify_subreg (new_mode, x, old_mode, 0);
}
- /* If X or Y are memory references, verify that their addresses are valid
- for the machine. */
- if (MEM_P (x)
- && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
- && ! push_operand (x, GET_MODE (x)))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
- x = validize_mem (x);
+ return ret;
+}
- if (MEM_P (y)
- && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
- y = validize_mem (y);
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X using
+ ALT_MODE instead of the operand's natural mode, MODE. CODE is the insn
+ code for the move in ALT_MODE, and is known to be valid. Returns the
+ instruction emitted, or NULL if X or Y cannot be represented in ALT_MODE. */
- gcc_assert (mode != BLKmode);
+static rtx
+emit_move_via_alt_mode (enum machine_mode alt_mode, enum machine_mode mode,
+ enum insn_code code, rtx x, rtx y)
+{
+ x = emit_move_change_mode (alt_mode, mode, x);
+ if (x == NULL_RTX)
+ return NULL_RTX;
+ y = emit_move_change_mode (alt_mode, mode, y);
+ if (y == NULL_RTX)
+ return NULL_RTX;
+ return emit_insn (GEN_FCN (code) (x, y));
+}
- last_insn = emit_move_insn_1 (x, y);
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X using
+ an integer mode of the same size as MODE. Returns the instruction
+ emitted, or NULL if such a move could not be generated. */
- if (y_cst && REG_P (x)
- && (set = single_set (last_insn)) != NULL_RTX
- && SET_DEST (set) == x
- && ! rtx_equal_p (y_cst, SET_SRC (set)))
- set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+static rtx
+emit_move_via_integer (enum machine_mode mode, rtx x, rtx y)
+{
+ enum machine_mode imode;
+ enum insn_code code;
- return last_insn;
+ /* There must exist a mode of the exact size we require. */
+ imode = int_mode_for_mode (mode);
+ if (imode == BLKmode)
+ return NULL_RTX;
+
+ /* The target must support moves in this mode. */
+ code = mov_optab->handlers[imode].insn_code;
+ if (code == CODE_FOR_nothing)
+ return NULL_RTX;
+
+ return emit_move_via_alt_mode (imode, mode, code, x, y);
}
-/* Low level part of emit_move_insn.
- Called just like emit_move_insn, but assumes X and Y
- are basically valid. */
+/* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
+ Return an equivalent MEM that does not use an auto-increment. */
-rtx
-emit_move_insn_1 (rtx x, rtx y)
+static rtx
+emit_move_resolve_push (enum machine_mode mode, rtx x)
{
- enum machine_mode mode = GET_MODE (x);
- enum machine_mode submode;
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum rtx_code code = GET_CODE (XEXP (x, 0));
+ HOST_WIDE_INT adjust;
+ rtx temp;
- gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
+ adjust = GET_MODE_SIZE (mode);
+#ifdef PUSH_ROUNDING
+ adjust = PUSH_ROUNDING (adjust);
+#endif
+ if (code == PRE_DEC || code == POST_DEC)
+ adjust = -adjust;
- if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
- return
- emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y));
+ /* Do not use anti_adjust_stack, since we don't want to update
+ stack_pointer_delta. */
+ temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
+ GEN_INT (adjust), stack_pointer_rtx,
+ 0, OPTAB_LIB_WIDEN);
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
- /* Expand complex moves by moving real part and imag part, if possible. */
- else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
- && BLKmode != (submode = GET_MODE_INNER (mode))
- && (mov_optab->handlers[(int) submode].insn_code
- != CODE_FOR_nothing))
+ switch (code)
{
- /* Don't split destination if it is a stack push. */
- int stack = push_operand (x, GET_MODE (x));
+ case PRE_INC:
+ case PRE_DEC:
+ temp = stack_pointer_rtx;
+ break;
+ case POST_INC:
+ temp = plus_constant (stack_pointer_rtx, -GET_MODE_SIZE (mode));
+ break;
+ case POST_DEC:
+ temp = plus_constant (stack_pointer_rtx, GET_MODE_SIZE (mode));
+ break;
+ default:
+ gcc_unreachable ();
+ }
-#ifdef PUSH_ROUNDING
- /* In case we output to the stack, but the size is smaller than the
- machine can push exactly, we need to use move instructions. */
- if (stack
- && (PUSH_ROUNDING (GET_MODE_SIZE (submode))
- != GET_MODE_SIZE (submode)))
- {
- rtx temp;
- HOST_WIDE_INT offset1, offset2;
+ return replace_equiv_address (x, temp);
+}
- /* Do not use anti_adjust_stack, since we don't want to update
- stack_pointer_delta. */
- temp = expand_binop (Pmode,
-#ifdef STACK_GROWS_DOWNWARD
- sub_optab,
-#else
- add_optab,
-#endif
- stack_pointer_rtx,
- GEN_INT
- (PUSH_ROUNDING
- (GET_MODE_SIZE (GET_MODE (x)))),
- stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
+/* A subroutine of emit_move_complex. Generate a move from Y into X.
+ X is known to satisfy push_operand, and MODE is known to be complex.
+ Returns the last instruction emitted. */
- if (temp != stack_pointer_rtx)
- emit_move_insn (stack_pointer_rtx, temp);
+static rtx
+emit_move_complex_push (enum machine_mode mode, rtx x, rtx y)
+{
+ enum machine_mode submode = GET_MODE_INNER (mode);
+ bool imag_first;
-#ifdef STACK_GROWS_DOWNWARD
- offset1 = 0;
- offset2 = GET_MODE_SIZE (submode);
-#else
- offset1 = -PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)));
- offset2 = (-PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)))
- + GET_MODE_SIZE (submode));
-#endif
+#ifdef PUSH_ROUNDING
+ unsigned int submodesize = GET_MODE_SIZE (submode);
- emit_move_insn (change_address (x, submode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset1))),
- gen_realpart (submode, y));
- emit_move_insn (change_address (x, submode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset2))),
- gen_imagpart (submode, y));
- }
- else
+ /* In case we output to the stack, but the size is smaller than the
+ machine can push exactly, we need to use move instructions. */
+ if (PUSH_ROUNDING (submodesize) != submodesize)
+ {
+ x = emit_move_resolve_push (mode, x);
+ return emit_move_insn (x, y);
+ }
#endif
- /* If this is a stack, push the highpart first, so it
- will be in the argument order.
- In that case, change_address is used only to convert
- the mode, not to change the address. */
- if (stack)
- {
- /* Note that the real part always precedes the imag part in memory
- regardless of machine's endianness. */
-#ifdef STACK_GROWS_DOWNWARD
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_imagpart (submode, y));
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_realpart (submode, y));
-#else
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_realpart (submode, y));
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- gen_imagpart (submode, y));
-#endif
- }
- else
- {
- rtx realpart_x, realpart_y;
- rtx imagpart_x, imagpart_y;
-
- /* If this is a complex value with each part being smaller than a
- word, the usual calling sequence will likely pack the pieces into
- a single register. Unfortunately, SUBREG of hard registers only
- deals in terms of words, so we have a problem converting input
- arguments to the CONCAT of two registers that is used elsewhere
- for complex values. If this is before reload, we can copy it into
- memory and reload. FIXME, we should see about using extract and
- insert on integer registers, but complex short and complex char
- variables should be rarely used. */
- if (GET_MODE_BITSIZE (mode) < 2 * BITS_PER_WORD
- && (reload_in_progress | reload_completed) == 0)
- {
- int packed_dest_p
- = (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER);
- int packed_src_p
- = (REG_P (y) && REGNO (y) < FIRST_PSEUDO_REGISTER);
+ /* Note that the real part always precedes the imag part in memory
+ regardless of machine's endianness. */
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case PRE_DEC:
+ case POST_DEC:
+ imag_first = true;
+ break;
+ case PRE_INC:
+ case POST_INC:
+ imag_first = false;
+ break;
+ default:
+ gcc_unreachable ();
+ }
- if (packed_dest_p || packed_src_p)
- {
- enum mode_class reg_class = ((class == MODE_COMPLEX_FLOAT)
- ? MODE_FLOAT : MODE_INT);
-
- enum machine_mode reg_mode
- = mode_for_size (GET_MODE_BITSIZE (mode), reg_class, 1);
-
- if (reg_mode != BLKmode)
- {
- rtx mem = assign_stack_temp (reg_mode,
- GET_MODE_SIZE (mode), 0);
- rtx cmem = adjust_address (mem, mode, 0);
-
- if (packed_dest_p)
- {
- rtx sreg = gen_rtx_SUBREG (reg_mode, x, 0);
-
- emit_move_insn_1 (cmem, y);
- return emit_move_insn_1 (sreg, mem);
- }
- else
- {
- rtx sreg = gen_rtx_SUBREG (reg_mode, y, 0);
-
- emit_move_insn_1 (mem, sreg);
- return emit_move_insn_1 (x, cmem);
- }
- }
- }
- }
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ read_complex_part (y, imag_first));
+ return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ read_complex_part (y, !imag_first));
+}
- realpart_x = gen_realpart (submode, x);
- realpart_y = gen_realpart (submode, y);
- imagpart_x = gen_imagpart (submode, x);
- imagpart_y = gen_imagpart (submode, y);
-
- /* Show the output dies here. This is necessary for SUBREGs
- of pseudos since we cannot track their lifetimes correctly;
- hard regs shouldn't appear here except as return values.
- We never want to emit such a clobber after reload. */
- if (x != y
- && ! (reload_in_progress || reload_completed)
- && (GET_CODE (realpart_x) == SUBREG
- || GET_CODE (imagpart_x) == SUBREG))
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
-
- emit_move_insn (realpart_x, realpart_y);
- emit_move_insn (imagpart_x, imagpart_y);
- }
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
+ MODE is known to be complex. Returns the last instruction emitted. */
+static rtx
+emit_move_complex (enum machine_mode mode, rtx x, rtx y)
+{
+ bool try_int;
+
+ /* Need to take special care for pushes, to maintain proper ordering
+ of the data, and possibly extra padding. */
+ if (push_operand (x, mode))
+ return emit_move_complex_push (mode, x, y);
+
+ /* For memory to memory moves, optimial behaviour can be had with the
+ existing block move logic. */
+ if (MEM_P (x) && MEM_P (y))
+ {
+ emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)),
+ BLOCK_OP_NO_LIBCALL);
return get_last_insn ();
}
- /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
- find a mode to do it in. If we have a movcc, use it. Otherwise,
- find the MODE_INT mode of the same width. */
- else if (GET_MODE_CLASS (mode) == MODE_CC
- && mov_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing)
+ /* See if we can coerce the target into moving both values at once. */
+
+ /* Not possible if the values are inherently not adjacent. */
+ if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
+ try_int = false;
+ /* Is possible if both are registers (or subregs of registers). */
+ else if (register_operand (x, mode) && register_operand (y, mode))
+ try_int = true;
+ /* If one of the operands is a memory, and alignment constraints
+ are friendly enough, we may be able to do combined memory operations.
+ We do not attempt this if Y is a constant because that combination is
+ usually better with the by-parts thing below. */
+ else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
+ && (!STRICT_ALIGNMENT
+ || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
+ try_int = true;
+ else
+ try_int = false;
+
+ if (try_int)
{
- enum insn_code insn_code;
- enum machine_mode tmode = VOIDmode;
- rtx x1 = x, y1 = y;
+ rtx ret = emit_move_via_integer (mode, x, y);
+ if (ret)
+ return ret;
+ }
- if (mode != CCmode
- && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing)
- tmode = CCmode;
- else
- for (tmode = QImode; tmode != VOIDmode;
- tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode))
- break;
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values. */
+ if (!reload_completed && !reload_in_progress
+ && REG_P (x) && !reg_overlap_mentioned_p (x, y))
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
- gcc_assert (tmode != VOIDmode);
+ write_complex_part (x, read_complex_part (y, false), false);
+ write_complex_part (x, read_complex_part (y, true), true);
+ return get_last_insn ();
+}
- /* Get X and Y in TMODE. We can't use gen_lowpart here because it
- may call change_address which is not appropriate if we were
- called when a reload was in progress. We don't have to worry
- about changing the address since the size in bytes is supposed to
- be the same. Copy the MEM to change the mode and move any
- substitutions from the old MEM to the new one. */
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
+ MODE is known to be MODE_CC. Returns the last instruction emitted. */
- if (reload_in_progress)
- {
- x = gen_lowpart_common (tmode, x1);
- if (x == 0 && MEM_P (x1))
- {
- x = adjust_address_nv (x1, tmode, 0);
- copy_replacements (x1, x);
- }
+static rtx
+emit_move_ccmode (enum machine_mode mode, rtx x, rtx y)
+{
+ rtx ret;
- y = gen_lowpart_common (tmode, y1);
- if (y == 0 && MEM_P (y1))
- {
- y = adjust_address_nv (y1, tmode, 0);
- copy_replacements (y1, y);
- }
- }
- else
+ /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
+ if (mode != CCmode)
+ {
+ enum insn_code code = mov_optab->handlers[CCmode].insn_code;
+ if (code != CODE_FOR_nothing)
+ return emit_move_via_alt_mode (CCmode, mode, code, x, y);
+ }
+
+ /* Otherwise, find the MODE_INT mode of the same width. */
+ ret = emit_move_via_integer (mode, x, y);
+ gcc_assert (ret != NULL);
+ return ret;
+}
+
+/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
+ MODE is any multi-word or full-word mode that lacks a move_insn
+ pattern. Note that you will get better code if you define such
+ patterns, even if they must turn into multiple assembler instructions. */
+
+static rtx
+emit_move_multi_word (enum machine_mode mode, rtx x, rtx y)
+{
+ rtx last_insn = 0;
+ rtx seq, inner;
+ bool need_clobber;
+ int i;
+
+ gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
+
+ /* If X is a push on the stack, do the push now and replace
+ X with a reference to the stack pointer. */
+ if (push_operand (x, mode))
+ x = emit_move_resolve_push (mode, x);
+
+ /* If we are in reload, see if either operand is a MEM whose address
+ is scheduled for replacement. */
+ if (reload_in_progress && MEM_P (x)
+ && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
+ x = replace_equiv_address_nv (x, inner);
+ if (reload_in_progress && MEM_P (y)
+ && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
+ y = replace_equiv_address_nv (y, inner);
+
+ start_sequence ();
+
+ need_clobber = false;
+ for (i = 0;
+ i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ i++)
+ {
+ rtx xpart = operand_subword (x, i, 1, mode);
+ rtx ypart = operand_subword (y, i, 1, mode);
+
+ /* If we can't get a part of Y, put Y into memory if it is a
+ constant. Otherwise, force it into a register. If we still
+ can't get a part of Y, abort. */
+ if (ypart == 0 && CONSTANT_P (y))
{
- x = gen_lowpart (tmode, x);
- y = gen_lowpart (tmode, y);
+ y = force_const_mem (mode, y);
+ ypart = operand_subword (y, i, 1, mode);
}
+ else if (ypart == 0)
+ ypart = operand_subword_force (y, i, mode);
- insn_code = mov_optab->handlers[(int) tmode].insn_code;
- return emit_insn (GEN_FCN (insn_code) (x, y));
+ gcc_assert (xpart && ypart);
+
+ need_clobber |= (GET_CODE (xpart) == SUBREG);
+
+ last_insn = emit_move_insn (xpart, ypart);
}
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed)
+ && need_clobber != 0)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+
+ emit_insn (seq);
+
+ return last_insn;
+}
+
+/* Low level part of emit_move_insn.
+ Called just like emit_move_insn, but assumes X and Y
+ are basically valid. */
+
+rtx
+emit_move_insn_1 (rtx x, rtx y)
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum insn_code code;
+
+ gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
+
+ code = mov_optab->handlers[mode].insn_code;
+ if (code != CODE_FOR_nothing)
+ return emit_insn (GEN_FCN (code) (x, y));
+
+ /* Expand complex moves by moving real part and imag part. */
+ if (COMPLEX_MODE_P (mode))
+ return emit_move_complex (mode, x, y);
+
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ return emit_move_ccmode (mode, x, y);
+
/* Try using a move pattern for the corresponding integer mode. This is
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
- else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (submode = int_mode_for_mode (mode)) != BLKmode
- && mov_optab->handlers[submode].insn_code != CODE_FOR_nothing)
- return emit_insn (GEN_FCN (mov_optab->handlers[submode].insn_code)
- (simplify_gen_subreg (submode, x, mode, 0),
- simplify_gen_subreg (submode, y, mode, 0)));
-
- /* This will handle any multi-word or full-word mode that lacks a move_insn
- pattern. However, you will get better code if you define such patterns,
- even if they must turn into multiple assembler instructions. */
- else
+ if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- rtx last_insn = 0;
- rtx seq, inner;
- int need_clobber;
- int i;
-
- gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
-
-#ifdef PUSH_ROUNDING
-
- /* If X is a push on the stack, do the push now and replace
- X with a reference to the stack pointer. */
- if (push_operand (x, GET_MODE (x)))
- {
- rtx temp;
- enum rtx_code code;
+ rtx ret = emit_move_via_integer (mode, x, y);
+ if (ret)
+ return ret;
+ }
- /* Do not use anti_adjust_stack, since we don't want to update
- stack_pointer_delta. */
- temp = expand_binop (Pmode,
-#ifdef STACK_GROWS_DOWNWARD
- sub_optab,
-#else
- add_optab,
-#endif
- stack_pointer_rtx,
- GEN_INT
- (PUSH_ROUNDING
- (GET_MODE_SIZE (GET_MODE (x)))),
- stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
-
- if (temp != stack_pointer_rtx)
- emit_move_insn (stack_pointer_rtx, temp);
-
- code = GET_CODE (XEXP (x, 0));
-
- /* Just hope that small offsets off SP are OK. */
- if (code == POST_INC)
- temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (-((HOST_WIDE_INT)
- GET_MODE_SIZE (GET_MODE (x)))));
- else if (code == POST_DEC)
- temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (GET_MODE_SIZE (GET_MODE (x))));
- else
- temp = stack_pointer_rtx;
+ return emit_move_multi_word (mode, x, y);
+}
- x = change_address (x, VOIDmode, temp);
- }
-#endif
+/* Generate code to copy Y into X.
+ Both Y and X must have the same mode, except that
+ Y can be a constant with VOIDmode.
+ This mode cannot be BLKmode; use emit_block_move for that.
- /* If we are in reload, see if either operand is a MEM whose address
- is scheduled for replacement. */
- if (reload_in_progress && MEM_P (x)
- && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
- x = replace_equiv_address_nv (x, inner);
- if (reload_in_progress && MEM_P (y)
- && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
- y = replace_equiv_address_nv (y, inner);
+ Return the last instruction emitted. */
- start_sequence ();
+rtx
+emit_move_insn (rtx x, rtx y)
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx y_cst = NULL_RTX;
+ rtx last_insn, set;
- need_clobber = 0;
- for (i = 0;
- i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
- i++)
- {
- rtx xpart = operand_subword (x, i, 1, mode);
- rtx ypart = operand_subword (y, i, 1, mode);
+ gcc_assert (mode != BLKmode
+ && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
- /* If we can't get a part of Y, put Y into memory if it is a
- constant. Otherwise, force it into a register. If we still
- can't get a part of Y, abort. */
- if (ypart == 0 && CONSTANT_P (y))
- {
- y = force_const_mem (mode, y);
- ypart = operand_subword (y, i, 1, mode);
- }
- else if (ypart == 0)
- ypart = operand_subword_force (y, i, mode);
+ if (CONSTANT_P (y))
+ {
+ if (optimize
+ && SCALAR_FLOAT_MODE_P (GET_MODE (x))
+ && (last_insn = compress_float_constant (x, y)))
+ return last_insn;
- gcc_assert (xpart && ypart);
+ y_cst = y;
- need_clobber |= (GET_CODE (xpart) == SUBREG);
+ if (!LEGITIMATE_CONSTANT_P (y))
+ {
+ y = force_const_mem (mode, y);
- last_insn = emit_move_insn (xpart, ypart);
+ /* If the target's cannot_force_const_mem prevented the spill,
+ assume that the target's move expanders will also take care
+ of the non-legitimate constant. */
+ if (!y)
+ y = y_cst;
}
+ }
- seq = get_insns ();
- end_sequence ();
+ /* If X or Y are memory references, verify that their addresses are valid
+ for the machine. */
+ if (MEM_P (x)
+ && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
+ && ! push_operand (x, GET_MODE (x)))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
+ x = validize_mem (x);
+
+ if (MEM_P (y)
+ && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
+ y = validize_mem (y);
- /* Show the output dies here. This is necessary for SUBREGs
- of pseudos since we cannot track their lifetimes correctly;
- hard regs shouldn't appear here except as return values.
- We never want to emit such a clobber after reload. */
- if (x != y
- && ! (reload_in_progress || reload_completed)
- && need_clobber != 0)
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+ gcc_assert (mode != BLKmode);
- emit_insn (seq);
+ last_insn = emit_move_insn_1 (x, y);
+
+ if (y_cst && REG_P (x)
+ && (set = single_set (last_insn)) != NULL_RTX
+ && SET_DEST (set) == x
+ && ! rtx_equal_p (y_cst, SET_SRC (set)))
+ set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
- return last_insn;
- }
+ return last_insn;
}
/* If Y is representable exactly in a narrower mode, and the target can
ALIGN (in bits) is maximum alignment we can assume.
If PARTIAL and REG are both nonzero, then copy that many of the first
- words of X into registers starting with REG, and push the rest of X.
- The amount of space pushed is decreased by PARTIAL words,
- rounded *down* to a multiple of PARM_BOUNDARY.
+ bytes of X into registers starting with REG, and push the rest of X.
+ The amount of space pushed is decreased by PARTIAL bytes.
REG must be a hard register in this case.
If REG is zero but PARTIAL is not, take any all others actions for an
argument partially in registers, but do not actually load any
/* Copy a block into the stack, entirely or partially. */
rtx temp;
- int used = partial * UNITS_PER_WORD;
+ int used;
int offset;
int skip;
- if (reg && GET_CODE (reg) == PARALLEL)
- {
- /* Use the size of the elt to compute offset. */
- rtx elt = XEXP (XVECEXP (reg, 0, 0), 0);
- used = partial * GET_MODE_SIZE (GET_MODE (elt));
- offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
- }
- else
- offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+ offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
+ used = partial - offset;
gcc_assert (size);
- used -= offset;
-
/* USED is now the # of bytes we need not copy to the stack
because registers will take care of them. */
int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
int i;
int not_stack;
- /* # words of start of argument
+ /* # bytes of start of argument
that we must make space for but need not store. */
int offset = partial % (PARM_BOUNDARY / BITS_PER_WORD);
int args_offset = INTVAL (args_so_far);
/* Now NOT_STACK gets the number of words that we don't need to
allocate on the stack. */
- not_stack = partial - offset;
+ not_stack = (partial - offset) / UNITS_PER_WORD;
/* If the partial register-part of the arg counts in its stack size,
skip the part of stack space corresponding to the registers.
if (GET_CODE (reg) == PARALLEL)
emit_group_load (reg, x, type, -1);
else
- move_block_to_reg (REGNO (reg), x, partial, mode);
+ {
+ gcc_assert (partial % UNITS_PER_WORD == 0);
+ move_block_to_reg (REGNO (reg), x, partial / UNITS_PER_WORD, mode);
+ }
}
if (extra && args_addr == 0 && where_pad == stack_direction)
? 0 : x);
}
-/* Expand an assignment that stores the value of FROM into TO.
- If WANT_VALUE is nonzero, return an rtx for the value of TO.
- (If the value is constant, this rtx is a constant.)
- Otherwise, the returned value is NULL_RTX. */
+/* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
+ FIELD is a bitfield. Returns true if the optimization was successful,
+ and there's nothing else to do. */
-rtx
-expand_assignment (tree to, tree from, int want_value)
+static bool
+optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitpos,
+ enum machine_mode mode1, rtx str_rtx,
+ tree to, tree src)
+{
+ enum machine_mode str_mode = GET_MODE (str_rtx);
+ unsigned int str_bitsize = GET_MODE_BITSIZE (str_mode);
+ tree op0, op1;
+ rtx value, result;
+ optab binop;
+
+ if (mode1 != VOIDmode
+ || bitsize >= BITS_PER_WORD
+ || str_bitsize > BITS_PER_WORD
+ || TREE_SIDE_EFFECTS (to)
+ || TREE_THIS_VOLATILE (to))
+ return false;
+
+ STRIP_NOPS (src);
+ if (!BINARY_CLASS_P (src)
+ || TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ return false;
+
+ op0 = TREE_OPERAND (src, 0);
+ op1 = TREE_OPERAND (src, 1);
+ STRIP_NOPS (op0);
+
+ if (!operand_equal_p (to, op0, 0))
+ return false;
+
+ if (MEM_P (str_rtx))
+ {
+ unsigned HOST_WIDE_INT offset1;
+
+ if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
+ str_mode = word_mode;
+ str_mode = get_best_mode (bitsize, bitpos,
+ MEM_ALIGN (str_rtx), str_mode, 0);
+ if (str_mode == VOIDmode)
+ return false;
+ str_bitsize = GET_MODE_BITSIZE (str_mode);
+
+ offset1 = bitpos;
+ bitpos %= str_bitsize;
+ offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
+ str_rtx = adjust_address (str_rtx, str_mode, offset1);
+ }
+ else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
+ return false;
+
+ /* If the bit field covers the whole REG/MEM, store_field
+ will likely generate better code. */
+ if (bitsize >= str_bitsize)
+ return false;
+
+ /* We can't handle fields split across multiple entities. */
+ if (bitpos + bitsize > str_bitsize)
+ return false;
+
+ if (BYTES_BIG_ENDIAN)
+ bitpos = str_bitsize - bitpos - bitsize;
+
+ switch (TREE_CODE (src))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ /* For now, just optimize the case of the topmost bitfield
+ where we don't need to do any masking and also
+ 1 bit bitfields where xor can be used.
+ We might win by one instruction for the other bitfields
+ too if insv/extv instructions aren't used, so that
+ can be added later. */
+ if (bitpos + bitsize != str_bitsize
+ && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
+ break;
+
+ value = expand_expr (op1, NULL_RTX, str_mode, 0);
+ value = convert_modes (str_mode,
+ TYPE_MODE (TREE_TYPE (op1)), value,
+ TYPE_UNSIGNED (TREE_TYPE (op1)));
+
+ /* We may be accessing data outside the field, which means
+ we can alias adjacent data. */
+ if (MEM_P (str_rtx))
+ {
+ str_rtx = shallow_copy_rtx (str_rtx);
+ set_mem_alias_set (str_rtx, 0);
+ set_mem_expr (str_rtx, 0);
+ }
+
+ binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ if (bitsize == 1 && bitpos + bitsize != str_bitsize)
+ {
+ value = expand_and (str_mode, value, const1_rtx, NULL);
+ binop = xor_optab;
+ }
+ value = expand_shift (LSHIFT_EXPR, str_mode, value,
+ build_int_cst (NULL_TREE, bitpos),
+ NULL_RTX, 1);
+ result = expand_binop (str_mode, binop, str_rtx,
+ value, str_rtx, 1, OPTAB_WIDEN);
+ if (result != str_rtx)
+ emit_move_insn (str_rtx, result);
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+
+/* Expand an assignment that stores the value of FROM into TO. */
+
+void
+expand_assignment (tree to, tree from)
{
rtx to_rtx = 0;
rtx result;
if (TREE_CODE (to) == ERROR_MARK)
{
result = expand_expr (from, NULL_RTX, VOIDmode, 0);
- return want_value ? result : NULL_RTX;
+ return;
}
/* Assignment of a structure component needs special treatment
Assignment of an array element at a constant index, and assignment of
an array element in an unaligned packed structure field, has the same
problem. */
-
- if (TREE_CODE (to) == COMPONENT_REF || TREE_CODE (to) == BIT_FIELD_REF
- || TREE_CODE (to) == ARRAY_REF || TREE_CODE (to) == ARRAY_RANGE_REF
+ if (handled_component_p (to)
|| TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
{
enum machine_mode mode1;
push_temp_slots ();
tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
- &unsignedp, &volatilep);
+ &unsignedp, &volatilep, true);
/* If we are going to use store_bit_field and extract_bit_field,
make sure to_rtx will be safe for multiple use. */
- if (mode1 == VOIDmode && want_value)
- tem = stabilize_reference (tem);
-
orig_to_rtx = to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, 0);
if (offset != 0)
offset));
}
- if (MEM_P (to_rtx))
- {
- /* If the field is at offset zero, we could have been given the
- DECL_RTX of the parent struct. Don't munge it. */
- to_rtx = shallow_copy_rtx (to_rtx);
-
- set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
- }
-
- /* Deal with volatile and readonly fields. The former is only done
- for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
- if (volatilep && MEM_P (to_rtx))
- {
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- MEM_VOLATILE_P (to_rtx) = 1;
- }
-
- if (MEM_P (to_rtx) && ! can_address_p (to))
+ /* Handle expand_expr of a complex value returning a CONCAT. */
+ if (GET_CODE (to_rtx) == CONCAT)
{
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
+ if (TREE_CODE (TREE_TYPE (from)) == COMPLEX_TYPE)
+ {
+ gcc_assert (bitpos == 0);
+ result = store_expr (from, to_rtx, false);
+ }
+ else
+ {
+ gcc_assert (bitpos == 0 || bitpos == GET_MODE_BITSIZE (mode1));
+ result = store_expr (from, XEXP (to_rtx, bitpos != 0), false);
+ }
}
-
- /* Optimize bitfld op= val in certain cases. */
- while (mode1 == VOIDmode && !want_value
- && bitsize > 0 && bitsize < BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (to_rtx)) <= BITS_PER_WORD
- && !TREE_SIDE_EFFECTS (to)
- && !TREE_THIS_VOLATILE (to))
+ else
{
- tree src, op0, op1;
- rtx value, str_rtx = to_rtx;
- HOST_WIDE_INT bitpos1 = bitpos;
- optab binop;
-
- src = from;
- STRIP_NOPS (src);
- if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE
- || !BINARY_CLASS_P (src))
- break;
-
- op0 = TREE_OPERAND (src, 0);
- op1 = TREE_OPERAND (src, 1);
- STRIP_NOPS (op0);
-
- if (! operand_equal_p (to, op0, 0))
- break;
-
- if (MEM_P (str_rtx))
+ if (MEM_P (to_rtx))
{
- enum machine_mode mode = GET_MODE (str_rtx);
- HOST_WIDE_INT offset1;
-
- if (GET_MODE_BITSIZE (mode) == 0
- || GET_MODE_BITSIZE (mode) > BITS_PER_WORD)
- mode = word_mode;
- mode = get_best_mode (bitsize, bitpos1, MEM_ALIGN (str_rtx),
- mode, 0);
- if (mode == VOIDmode)
- break;
-
- offset1 = bitpos1;
- bitpos1 %= GET_MODE_BITSIZE (mode);
- offset1 = (offset1 - bitpos1) / BITS_PER_UNIT;
- str_rtx = adjust_address (str_rtx, mode, offset1);
- }
- else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
- break;
-
- /* If the bit field covers the whole REG/MEM, store_field
- will likely generate better code. */
- if (bitsize >= GET_MODE_BITSIZE (GET_MODE (str_rtx)))
- break;
+ /* If the field is at offset zero, we could have been given the
+ DECL_RTX of the parent struct. Don't munge it. */
+ to_rtx = shallow_copy_rtx (to_rtx);
- /* We can't handle fields split across multiple entities. */
- if (bitpos1 + bitsize > GET_MODE_BITSIZE (GET_MODE (str_rtx)))
- break;
-
- if (BYTES_BIG_ENDIAN)
- bitpos1 = GET_MODE_BITSIZE (GET_MODE (str_rtx)) - bitpos1
- - bitsize;
-
- /* Special case some bitfield op= exp. */
- switch (TREE_CODE (src))
- {
- case PLUS_EXPR:
- case MINUS_EXPR:
- /* For now, just optimize the case of the topmost bitfield
- where we don't need to do any masking and also
- 1 bit bitfields where xor can be used.
- We might win by one instruction for the other bitfields
- too if insv/extv instructions aren't used, so that
- can be added later. */
- if (bitpos1 + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx))
- && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
- break;
- value = expand_expr (op1, NULL_RTX, GET_MODE (str_rtx), 0);
- value = convert_modes (GET_MODE (str_rtx),
- TYPE_MODE (TREE_TYPE (op1)), value,
- TYPE_UNSIGNED (TREE_TYPE (op1)));
-
- /* We may be accessing data outside the field, which means
- we can alias adjacent data. */
- if (MEM_P (str_rtx))
- {
- str_rtx = shallow_copy_rtx (str_rtx);
- set_mem_alias_set (str_rtx, 0);
- set_mem_expr (str_rtx, 0);
- }
+ set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
- binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
- if (bitsize == 1
- && bitpos1 + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx)))
- {
- value = expand_and (GET_MODE (str_rtx), value, const1_rtx,
- NULL_RTX);
- binop = xor_optab;
- }
- value = expand_shift (LSHIFT_EXPR, GET_MODE (str_rtx), value,
- build_int_cst (NULL_TREE, bitpos1),
- NULL_RTX, 1);
- result = expand_binop (GET_MODE (str_rtx), binop, str_rtx,
- value, str_rtx, 1, OPTAB_WIDEN);
- if (result != str_rtx)
- emit_move_insn (str_rtx, result);
- free_temp_slots ();
- pop_temp_slots ();
- return NULL_RTX;
-
- default:
- break;
+ /* Deal with volatile and readonly fields. The former is only
+ done for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
+ if (volatilep)
+ MEM_VOLATILE_P (to_rtx) = 1;
+ if (component_uses_parent_alias_set (to))
+ MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
- break;
+ if (optimize_bitfield_assignment_op (bitsize, bitpos, mode1,
+ to_rtx, to, from))
+ result = NULL;
+ else
+ result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ TREE_TYPE (tem), get_alias_set (to));
}
- result = store_field (to_rtx, bitsize, bitpos, mode1, from,
- (want_value
- /* Spurious cast for HPUX compiler. */
- ? ((enum machine_mode)
- TYPE_MODE (TREE_TYPE (to)))
- : VOIDmode),
- unsignedp, TREE_TYPE (tem), get_alias_set (to));
-
- preserve_temp_slots (result);
+ if (result)
+ preserve_temp_slots (result);
free_temp_slots ();
pop_temp_slots ();
-
- /* If the value is meaningful, convert RESULT to the proper mode.
- Otherwise, return nothing. */
- return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)),
- TYPE_MODE (TREE_TYPE (from)),
- result,
- TYPE_UNSIGNED (TREE_TYPE (to)))
- : NULL_RTX);
+ return;
}
/* If the rhs is a function call and its value is not an aggregate,
preserve_temp_slots (to_rtx);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? to_rtx : NULL_RTX;
+ return;
}
/* Ordinary treatment. Expand TO to get a REG or MEM rtx.
preserve_temp_slots (to_rtx);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? to_rtx : NULL_RTX;
+ return;
}
/* In case we are returning the contents of an object which overlaps
preserve_temp_slots (to_rtx);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? to_rtx : NULL_RTX;
+ return;
}
/* Compute FROM and store the value in the rtx we got. */
push_temp_slots ();
- result = store_expr (from, to_rtx, want_value);
+ result = store_expr (from, to_rtx, 0);
preserve_temp_slots (result);
free_temp_slots ();
pop_temp_slots ();
- return want_value ? result : NULL_RTX;
+ return;
}
/* Generate code for computing expression EXP,
and storing the value into TARGET.
- If WANT_VALUE & 1 is nonzero, return a copy of the value
- not in TARGET, so that we can be sure to use the proper
- value in a containing expression even if TARGET has something
- else stored in it. If possible, we copy the value through a pseudo
- and return that pseudo. Or, if the value is constant, we try to
- return the constant. In some cases, we return a pseudo
- copied *from* TARGET.
-
If the mode is BLKmode then we may return TARGET itself.
It turns out that in BLKmode it doesn't cause a problem.
because C has no operators that could combine two different
with no sequence point. Will other languages need this to
be more thorough?
- If WANT_VALUE & 1 is 0, we return NULL, to make sure
- to catch quickly any cases where the caller uses the value
- and fails to set WANT_VALUE.
-
- If WANT_VALUE & 2 is set, this is a store into a call param on the
+ If CALL_PARAM_P is nonzero, this is a store into a call param on the
stack, and block moves may need to be treated specially. */
rtx
-store_expr (tree exp, rtx target, int want_value)
+store_expr (tree exp, rtx target, int call_param_p)
{
rtx temp;
rtx alt_rtl = NULL_RTX;
int dont_return_target = 0;
- int dont_store_target = 0;
if (VOID_TYPE_P (TREE_TYPE (exp)))
{
/* C++ can generate ?: expressions with a throw expression in one
branch and an rvalue in the other. Here, we resolve attempts to
store the throw expression's nonexistent result. */
- gcc_assert (!want_value);
+ gcc_assert (!call_param_p);
expand_expr (exp, const0_rtx, VOIDmode, 0);
return NULL_RTX;
}
/* Perform first part of compound expression, then assign from second
part. */
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
- return store_expr (TREE_OPERAND (exp, 1), target, want_value);
+ call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
+ return store_expr (TREE_OPERAND (exp, 1), target, call_param_p);
}
else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
{
do_pending_stack_adjust ();
NO_DEFER_POP;
jumpifnot (TREE_OPERAND (exp, 0), lab1);
- store_expr (TREE_OPERAND (exp, 1), target, want_value & 2);
+ store_expr (TREE_OPERAND (exp, 1), target, call_param_p);
emit_jump_insn (gen_jump (lab2));
emit_barrier ();
emit_label (lab1);
- store_expr (TREE_OPERAND (exp, 2), target, want_value & 2);
+ store_expr (TREE_OPERAND (exp, 2), target, call_param_p);
emit_label (lab2);
OK_DEFER_POP;
- return want_value & 1 ? target : NULL_RTX;
- }
- else if ((want_value & 1) != 0
- && MEM_P (target)
- && ! MEM_VOLATILE_P (target)
- && GET_MODE (target) != BLKmode)
- /* If target is in memory and caller wants value in a register instead,
- arrange that. Pass TARGET as target for expand_expr so that,
- if EXP is another assignment, WANT_VALUE will be nonzero for it.
- We know expand_expr will not use the target in that case.
- Don't do this if TARGET is volatile because we are supposed
- to write it and then read it. */
- {
- temp = expand_expr (exp, target, GET_MODE (target),
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
- if (GET_MODE (temp) != BLKmode && GET_MODE (temp) != VOIDmode)
- {
- /* If TEMP is already in the desired TARGET, only copy it from
- memory and don't store it there again. */
- if (temp == target
- || (rtx_equal_p (temp, target)
- && ! side_effects_p (temp) && ! side_effects_p (target)))
- dont_store_target = 1;
- temp = copy_to_reg (temp);
- }
- dont_return_target = 1;
+ return NULL_RTX;
}
else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
/* If this is a scalar in a register that is stored in a wider mode
{
rtx inner_target = 0;
- /* If we don't want a value, we can do the conversion inside EXP,
- which will often result in some optimizations. Do the conversion
- in two steps: first change the signedness, if needed, then
- the extend. But don't do this if the type of EXP is a subtype
- of something else since then the conversion might involve
- more than just converting modes. */
- if ((want_value & 1) == 0
- && INTEGRAL_TYPE_P (TREE_TYPE (exp))
+ /* We can do the conversion inside EXP, which will often result
+ in some optimizations. Do the conversion in two steps: first
+ change the signedness, if needed, then the extend. But don't
+ do this if the type of EXP is a subtype of something else
+ since then the conversion might involve more than just
+ converting modes. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
&& TREE_TYPE (TREE_TYPE (exp)) == 0
&& (!lang_hooks.reduce_bit_field_operations
|| (GET_MODE_PRECISION (GET_MODE (target))
}
temp = expand_expr (exp, inner_target, VOIDmode,
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
-
- /* If TEMP is a MEM and we want a result value, make the access
- now so it gets done only once. Strictly speaking, this is
- only necessary if the MEM is volatile, or if the address
- overlaps TARGET. But not performing the load twice also
- reduces the amount of rtl we generate and then have to CSE. */
- if (MEM_P (temp) && (want_value & 1) != 0)
- temp = copy_to_reg (temp);
+ call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
/* If TEMP is a VOIDmode constant, use convert_modes to make
sure that we properly convert it. */
convert_move (SUBREG_REG (target), temp,
SUBREG_PROMOTED_UNSIGNED_P (target));
- /* If we promoted a constant, change the mode back down to match
- target. Otherwise, the caller might get confused by a result whose
- mode is larger than expected. */
-
- if ((want_value & 1) != 0 && GET_MODE (temp) != GET_MODE (target))
- {
- if (GET_MODE (temp) != VOIDmode)
- {
- temp = gen_lowpart_SUBREG (GET_MODE (target), temp);
- SUBREG_PROMOTED_VAR_P (temp) = 1;
- SUBREG_PROMOTED_UNSIGNED_SET (temp,
- SUBREG_PROMOTED_UNSIGNED_P (target));
- }
- else
- temp = convert_modes (GET_MODE (target),
- GET_MODE (SUBREG_REG (target)),
- temp, SUBREG_PROMOTED_UNSIGNED_P (target));
- }
-
- return want_value & 1 ? temp : NULL_RTX;
+ return NULL_RTX;
}
else
{
temp = expand_expr_real (exp, target, GET_MODE (target),
- (want_value & 2
+ (call_param_p
? EXPAND_STACK_PARM : EXPAND_NORMAL),
&alt_rtl);
/* Return TARGET if it's a specified hardware register.
&& REGNO (target) < FIRST_PSEUDO_REGISTER)
&& !(MEM_P (target) && MEM_VOLATILE_P (target))
&& ! rtx_equal_p (temp, target)
- && (CONSTANT_P (temp) || (want_value & 1) != 0))
+ && CONSTANT_P (temp))
dont_return_target = 1;
}
|| (temp != target && (side_effects_p (temp)
|| side_effects_p (target))))
&& TREE_CODE (exp) != ERROR_MARK
- && ! dont_store_target
/* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
but TARGET is not valid memory reference, TEMP will differ
from TARGET although it is really the same location. */
if (GET_CODE (size) == CONST_INT
&& INTVAL (size) < TREE_STRING_LENGTH (exp))
emit_block_move (target, temp, size,
- (want_value & 2
+ (call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
else
{
size_int (TREE_STRING_LENGTH (exp)));
rtx copy_size_rtx
= expand_expr (copy_size, NULL_RTX, VOIDmode,
- (want_value & 2
+ (call_param_p
? EXPAND_STACK_PARM : EXPAND_NORMAL));
rtx label = 0;
copy_size_rtx = convert_to_mode (ptr_mode, copy_size_rtx,
TYPE_UNSIGNED (sizetype));
emit_block_move (target, temp, copy_size_rtx,
- (want_value & 2
+ (call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
/* Figure out how much is left in TARGET that we have to clear.
int_size_in_bytes (TREE_TYPE (exp)));
else if (GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
- (want_value & 2
+ (call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
else
{
}
}
- /* If we don't want a value, return NULL_RTX. */
- if ((want_value & 1) == 0)
- return NULL_RTX;
-
- /* If we are supposed to return TEMP, do so as long as it isn't a MEM.
- ??? The latter test doesn't seem to make sense. */
- else if (dont_return_target && !MEM_P (temp))
- return temp;
-
- /* Return TARGET itself if it is a hard register. */
- else if ((want_value & 1) != 0
- && GET_MODE (target) != BLKmode
- && ! (REG_P (target)
- && REGNO (target) < FIRST_PSEUDO_REGISTER))
- return copy_to_reg (target);
-
- else
- return target;
+ return NULL_RTX;
}
\f
-/* Examine CTOR. Discover how many scalar fields are set to nonzero
- values and place it in *P_NZ_ELTS. Discover how many scalar fields
- are set to non-constant values and place it in *P_NC_ELTS. */
+/* Examine CTOR to discover:
+ * how many scalar fields are set to nonzero values,
+ and place it in *P_NZ_ELTS;
+ * how many scalar fields are set to non-constant values,
+ and place it in *P_NC_ELTS; and
+ * how many scalar fields in total are in CTOR,
+ and place it in *P_ELT_COUNT. */
static void
categorize_ctor_elements_1 (tree ctor, HOST_WIDE_INT *p_nz_elts,
- HOST_WIDE_INT *p_nc_elts)
+ HOST_WIDE_INT *p_nc_elts,
+ HOST_WIDE_INT *p_elt_count)
{
- HOST_WIDE_INT nz_elts, nc_elts;
+ HOST_WIDE_INT nz_elts, nc_elts, elt_count;
tree list;
nz_elts = 0;
nc_elts = 0;
+ elt_count = 0;
for (list = CONSTRUCTOR_ELTS (ctor); list; list = TREE_CHAIN (list))
{
{
case CONSTRUCTOR:
{
- HOST_WIDE_INT nz = 0, nc = 0;
- categorize_ctor_elements_1 (value, &nz, &nc);
+ HOST_WIDE_INT nz = 0, nc = 0, count = 0;
+ categorize_ctor_elements_1 (value, &nz, &nc, &count);
nz_elts += mult * nz;
nc_elts += mult * nc;
+ elt_count += mult * count;
}
break;
case REAL_CST:
if (!initializer_zerop (value))
nz_elts += mult;
+ elt_count += mult;
+ break;
+
+ case STRING_CST:
+ nz_elts += mult * TREE_STRING_LENGTH (value);
+ elt_count += mult * TREE_STRING_LENGTH (value);
break;
+
case COMPLEX_CST:
if (!initializer_zerop (TREE_REALPART (value)))
nz_elts += mult;
if (!initializer_zerop (TREE_IMAGPART (value)))
nz_elts += mult;
+ elt_count += mult;
break;
+
case VECTOR_CST:
{
tree v;
for (v = TREE_VECTOR_CST_ELTS (value); v; v = TREE_CHAIN (v))
- if (!initializer_zerop (TREE_VALUE (v)))
- nz_elts += mult;
+ {
+ if (!initializer_zerop (TREE_VALUE (v)))
+ nz_elts += mult;
+ elt_count += mult;
+ }
}
break;
default:
nz_elts += mult;
+ elt_count += mult;
if (!initializer_constant_valid_p (value, TREE_TYPE (value)))
nc_elts += mult;
break;
*p_nz_elts += nz_elts;
*p_nc_elts += nc_elts;
+ *p_elt_count += elt_count;
}
void
categorize_ctor_elements (tree ctor, HOST_WIDE_INT *p_nz_elts,
- HOST_WIDE_INT *p_nc_elts)
+ HOST_WIDE_INT *p_nc_elts,
+ HOST_WIDE_INT *p_elt_count)
{
*p_nz_elts = 0;
*p_nc_elts = 0;
- categorize_ctor_elements_1 (ctor, p_nz_elts, p_nc_elts);
+ *p_elt_count = 0;
+ categorize_ctor_elements_1 (ctor, p_nz_elts, p_nc_elts, p_elt_count);
}
/* Count the number of scalars in TYPE. Return -1 on overflow or
case VOID_TYPE:
case METHOD_TYPE:
case FILE_TYPE:
- case SET_TYPE:
case FUNCTION_TYPE:
case LANG_TYPE:
default:
/* Return 1 if EXP contains mostly (3/4) zeros. */
-int
+static int
mostly_zeros_p (tree exp)
{
if (TREE_CODE (exp) == CONSTRUCTOR)
{
- HOST_WIDE_INT nz_elts, nc_elts, elts;
+ HOST_WIDE_INT nz_elts, nc_elts, count, elts;
- /* If there are no ranges of true bits, it is all zero. */
- if (TREE_TYPE (exp) && TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
- return CONSTRUCTOR_ELTS (exp) == NULL_TREE;
-
- categorize_ctor_elements (exp, &nz_elts, &nc_elts);
+ categorize_ctor_elements (exp, &nz_elts, &nc_elts, &count);
elts = count_type_elements (TREE_TYPE (exp));
return nz_elts < elts / 4;
store_constructor (exp, target, cleared, bitsize / BITS_PER_UNIT);
}
else
- store_field (target, bitsize, bitpos, mode, exp, VOIDmode, 0, type,
- alias_set);
+ store_field (target, bitsize, bitpos, mode, exp, type, alias_set);
}
/* Store the value of constructor EXP into the rtx TARGET.
the loop. */
expand_assignment (index,
build2 (PLUS_EXPR, TREE_TYPE (index),
- index, integer_one_node), 0);
+ index, integer_one_node));
emit_jump (loop_start);
gen_rtvec_v (n_elts, vector))));
break;
}
-
- /* Set constructor assignments. */
- case SET_TYPE:
- {
- tree elt = CONSTRUCTOR_ELTS (exp);
- unsigned HOST_WIDE_INT nbytes = int_size_in_bytes (type), nbits;
- tree domain = TYPE_DOMAIN (type);
- tree domain_min, domain_max, bitlength;
-
- /* The default implementation strategy is to extract the
- constant parts of the constructor, use that to initialize
- the target, and then "or" in whatever non-constant ranges
- we need in addition.
-
- If a large set is all zero or all ones, it is probably
- better to set it using memset. Also, if a large set has
- just a single range, it may also be better to first clear
- all the first clear the set (using memset), and set the
- bits we want. */
-
- /* Check for all zeros. */
- if (elt == NULL_TREE && size > 0)
- {
- if (!cleared)
- clear_storage (target, GEN_INT (size));
- return;
- }
-
- domain_min = convert (sizetype, TYPE_MIN_VALUE (domain));
- domain_max = convert (sizetype, TYPE_MAX_VALUE (domain));
- bitlength = size_binop (PLUS_EXPR,
- size_diffop (domain_max, domain_min),
- ssize_int (1));
-
- nbits = tree_low_cst (bitlength, 1);
-
- /* For "small" sets, or "medium-sized" (up to 32 bytes) sets
- that are "complicated" (more than one range), initialize
- (the constant parts) by copying from a constant. */
- if (GET_MODE (target) != BLKmode || nbits <= 2 * BITS_PER_WORD
- || (nbytes <= 32 && TREE_CHAIN (elt) != NULL_TREE))
- {
- unsigned int set_word_size = TYPE_ALIGN (TREE_TYPE (exp));
- enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1);
- char *bit_buffer = alloca (nbits);
- HOST_WIDE_INT word = 0;
- unsigned int bit_pos = 0;
- unsigned int ibit = 0;
- unsigned int offset = 0; /* In bytes from beginning of set. */
-
- elt = get_set_constructor_bits (exp, bit_buffer, nbits);
- for (;;)
- {
- if (bit_buffer[ibit])
- {
- if (BYTES_BIG_ENDIAN)
- word |= (1 << (set_word_size - 1 - bit_pos));
- else
- word |= 1 << bit_pos;
- }
-
- bit_pos++; ibit++;
- if (bit_pos >= set_word_size || ibit == nbits)
- {
- if (word != 0 || ! cleared)
- {
- rtx datum = gen_int_mode (word, mode);
- rtx to_rtx;
-
- /* The assumption here is that it is safe to
- use XEXP if the set is multi-word, but not
- if it's single-word. */
- if (MEM_P (target))
- to_rtx = adjust_address (target, mode, offset);
- else
- {
- gcc_assert (!offset);
- to_rtx = target;
- }
- emit_move_insn (to_rtx, datum);
- }
-
- if (ibit == nbits)
- break;
- word = 0;
- bit_pos = 0;
- offset += set_word_size / BITS_PER_UNIT;
- }
- }
- }
- else if (!cleared)
- /* Don't bother clearing storage if the set is all ones. */
- if (TREE_CHAIN (elt) != NULL_TREE
- || (TREE_PURPOSE (elt) == NULL_TREE
- ? nbits != 1
- : ( ! host_integerp (TREE_VALUE (elt), 0)
- || ! host_integerp (TREE_PURPOSE (elt), 0)
- || (tree_low_cst (TREE_VALUE (elt), 0)
- - tree_low_cst (TREE_PURPOSE (elt), 0) + 1
- != (HOST_WIDE_INT) nbits))))
- clear_storage (target, expr_size (exp));
-
- for (; elt != NULL_TREE; elt = TREE_CHAIN (elt))
- {
- /* Start of range of element or NULL. */
- tree startbit = TREE_PURPOSE (elt);
- /* End of range of element, or element value. */
- tree endbit = TREE_VALUE (elt);
- HOST_WIDE_INT startb, endb;
- rtx bitlength_rtx, startbit_rtx, endbit_rtx, targetx;
-
- bitlength_rtx = expand_expr (bitlength,
- NULL_RTX, MEM, EXPAND_CONST_ADDRESS);
-
- /* Handle non-range tuple element like [ expr ]. */
- if (startbit == NULL_TREE)
- {
- startbit = save_expr (endbit);
- endbit = startbit;
- }
-
- startbit = convert (sizetype, startbit);
- endbit = convert (sizetype, endbit);
- if (! integer_zerop (domain_min))
- {
- startbit = size_binop (MINUS_EXPR, startbit, domain_min);
- endbit = size_binop (MINUS_EXPR, endbit, domain_min);
- }
- startbit_rtx = expand_expr (startbit, NULL_RTX, MEM,
- EXPAND_CONST_ADDRESS);
- endbit_rtx = expand_expr (endbit, NULL_RTX, MEM,
- EXPAND_CONST_ADDRESS);
-
- if (REG_P (target))
- {
- targetx
- = assign_temp
- ((build_qualified_type (lang_hooks.types.type_for_mode
- (GET_MODE (target), 0),
- TYPE_QUAL_CONST)),
- 0, 1, 1);
- emit_move_insn (targetx, target);
- }
-
- else
- {
- gcc_assert (MEM_P (target));
- targetx = target;
- }
-
- /* Optimization: If startbit and endbit are constants divisible
- by BITS_PER_UNIT, call memset instead. */
- if (TREE_CODE (startbit) == INTEGER_CST
- && TREE_CODE (endbit) == INTEGER_CST
- && (startb = TREE_INT_CST_LOW (startbit)) % BITS_PER_UNIT == 0
- && (endb = TREE_INT_CST_LOW (endbit) + 1) % BITS_PER_UNIT == 0)
- {
- emit_library_call (memset_libfunc, LCT_NORMAL,
- VOIDmode, 3,
- plus_constant (XEXP (targetx, 0),
- startb / BITS_PER_UNIT),
- Pmode,
- constm1_rtx, TYPE_MODE (integer_type_node),
- GEN_INT ((endb - startb) / BITS_PER_UNIT),
- TYPE_MODE (sizetype));
- }
- else
- emit_library_call (setbits_libfunc, LCT_NORMAL,
- VOIDmode, 4, XEXP (targetx, 0),
- Pmode, bitlength_rtx, TYPE_MODE (sizetype),
- startbit_rtx, TYPE_MODE (sizetype),
- endbit_rtx, TYPE_MODE (sizetype));
-
- if (REG_P (target))
- emit_move_insn (target, targetx);
- }
- break;
- }
+
default:
gcc_unreachable ();
}
BITSIZE bits, starting BITPOS bits from the start of TARGET.
If MODE is VOIDmode, it means that we are storing into a bit-field.
- If VALUE_MODE is VOIDmode, return nothing in particular.
- UNSIGNEDP is not used in this case.
-
- Otherwise, return an rtx for the value stored. This rtx
- has mode VALUE_MODE if that is convenient to do.
- In this case, UNSIGNEDP must be nonzero if the value is an unsigned type.
+ Always return const0_rtx unless we have something particular to
+ return.
TYPE is the type of the underlying object,
static rtx
store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
- enum machine_mode mode, tree exp, enum machine_mode value_mode,
- int unsignedp, tree type, int alias_set)
+ enum machine_mode mode, tree exp, tree type, int alias_set)
{
HOST_WIDE_INT width_mask = 0;
if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target)))
emit_move_insn (object, target);
- store_field (blk_object, bitsize, bitpos, mode, exp, VOIDmode, 0, type,
- alias_set);
+ store_field (blk_object, bitsize, bitpos, mode, exp, type, alias_set);
emit_move_insn (target, object);
/* We're storing into a struct containing a single __complex. */
gcc_assert (!bitpos);
- return store_expr (exp, target, value_mode != VOIDmode);
+ return store_expr (exp, target, 0);
}
/* If the structure is in a register or if the component
/ BITS_PER_UNIT),
BLOCK_OP_NORMAL);
- return value_mode == VOIDmode ? const0_rtx : target;
+ return const0_rtx;
}
/* Store the value in the bitfield. */
store_bit_field (target, bitsize, bitpos, mode, temp);
- if (value_mode != VOIDmode)
- {
- /* The caller wants an rtx for the value.
- If possible, avoid refetching from the bitfield itself. */
- if (width_mask != 0
- && ! (MEM_P (target) && MEM_VOLATILE_P (target)))
- {
- tree count;
- enum machine_mode tmode;
-
- tmode = GET_MODE (temp);
- if (tmode == VOIDmode)
- tmode = value_mode;
-
- if (unsignedp)
- return expand_and (tmode, temp,
- gen_int_mode (width_mask, tmode),
- NULL_RTX);
-
- count = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (tmode) - bitsize);
- temp = expand_shift (LSHIFT_EXPR, tmode, temp, count, 0, 0);
- return expand_shift (RSHIFT_EXPR, tmode, temp, count, 0, 0);
- }
-
- return extract_bit_field (target, bitsize, bitpos, unsignedp,
- NULL_RTX, value_mode, VOIDmode);
- }
return const0_rtx;
}
else
{
- rtx addr = XEXP (target, 0);
- rtx to_rtx = target;
-
- /* If a value is wanted, it must be the lhs;
- so make the address stable for multiple use. */
-
- if (value_mode != VOIDmode && !REG_P (addr)
- && ! CONSTANT_ADDRESS_P (addr)
- /* A frame-pointer reference is already stable. */
- && ! (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 1)) == CONST_INT
- && (XEXP (addr, 0) == virtual_incoming_args_rtx
- || XEXP (addr, 0) == virtual_stack_vars_rtx)))
- to_rtx = replace_equiv_address (to_rtx, copy_to_reg (addr));
-
/* Now build a reference to just the desired component. */
-
- to_rtx = adjust_address (target, mode, bitpos / BITS_PER_UNIT);
+ rtx to_rtx = adjust_address (target, mode, bitpos / BITS_PER_UNIT);
if (to_rtx == target)
to_rtx = copy_rtx (to_rtx);
if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
set_mem_alias_set (to_rtx, alias_set);
- return store_expr (exp, to_rtx, value_mode != VOIDmode);
+ return store_expr (exp, to_rtx, 0);
}
}
\f
If the field describes a variable-sized object, *PMODE is set to
VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
- this case, but the address of the object can be found. */
+ this case, but the address of the object can be found.
+
+ If KEEP_ALIGNING is true and the target is STRICT_ALIGNMENT, we don't
+ look through nodes that serve as markers of a greater alignment than
+ the one that can be deduced from the expression. These nodes make it
+ possible for front-ends to prevent temporaries from being created by
+ the middle-end on alignment considerations. For that purpose, the
+ normal operating mode at high-level is to always pass FALSE so that
+ the ultimate containing object is really returned; moreover, the
+ associated predicate handled_component_p will always return TRUE
+ on these nodes, thus indicating that they are essentially handled
+ by get_inner_reference. TRUE should only be passed when the caller
+ is scanning the expression in order to build another representation
+ and specifically knows how to handle these nodes; as such, this is
+ the normal operating mode in the RTL expanders. */
tree
get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
HOST_WIDE_INT *pbitpos, tree *poffset,
enum machine_mode *pmode, int *punsignedp,
- int *pvolatilep)
+ int *pvolatilep, bool keep_aligning)
{
tree size_tree = 0;
enum machine_mode mode = VOIDmode;
and find the ultimate containing object. */
while (1)
{
- if (TREE_CODE (exp) == BIT_FIELD_REF)
- bit_offset = size_binop (PLUS_EXPR, bit_offset, TREE_OPERAND (exp, 2));
- else if (TREE_CODE (exp) == COMPONENT_REF)
+ switch (TREE_CODE (exp))
{
- tree field = TREE_OPERAND (exp, 1);
- tree this_offset = component_ref_field_offset (exp);
+ case BIT_FIELD_REF:
+ bit_offset = size_binop (PLUS_EXPR, bit_offset,
+ TREE_OPERAND (exp, 2));
+ break;
- /* If this field hasn't been filled in yet, don't go
- past it. This should only happen when folding expressions
- made during type construction. */
- if (this_offset == 0)
- break;
+ case COMPONENT_REF:
+ {
+ tree field = TREE_OPERAND (exp, 1);
+ tree this_offset = component_ref_field_offset (exp);
+
+ /* If this field hasn't been filled in yet, don't go past it.
+ This should only happen when folding expressions made during
+ type construction. */
+ if (this_offset == 0)
+ break;
+
+ offset = size_binop (PLUS_EXPR, offset, this_offset);
+ bit_offset = size_binop (PLUS_EXPR, bit_offset,
+ DECL_FIELD_BIT_OFFSET (field));
+
+ /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
+ }
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ {
+ tree index = TREE_OPERAND (exp, 1);
+ tree low_bound = array_ref_low_bound (exp);
+ tree unit_size = array_ref_element_size (exp);
+
+ /* We assume all arrays have sizes that are a multiple of a byte.
+ First subtract the lower bound, if any, in the type of the
+ index, then convert to sizetype and multiply by the size of
+ the array element. */
+ if (! integer_zerop (low_bound))
+ index = fold (build2 (MINUS_EXPR, TREE_TYPE (index),
+ index, low_bound));
+
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (MULT_EXPR,
+ convert (sizetype, index),
+ unit_size));
+ }
+ break;
+
+ case REALPART_EXPR:
+ break;
- offset = size_binop (PLUS_EXPR, offset, this_offset);
+ case IMAGPART_EXPR:
bit_offset = size_binop (PLUS_EXPR, bit_offset,
- DECL_FIELD_BIT_OFFSET (field));
+ bitsize_int (*pbitsize));
+ break;
- /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
- }
+ case VIEW_CONVERT_EXPR:
+ if (keep_aligning && STRICT_ALIGNMENT
+ && (TYPE_ALIGN (TREE_TYPE (exp))
+ > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ < BIGGEST_ALIGNMENT)
+ && (TYPE_ALIGN_OK (TREE_TYPE (exp))
+ || TYPE_ALIGN_OK (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ goto done;
+ break;
- else if (TREE_CODE (exp) == ARRAY_REF
- || TREE_CODE (exp) == ARRAY_RANGE_REF)
- {
- tree index = TREE_OPERAND (exp, 1);
- tree low_bound = array_ref_low_bound (exp);
- tree unit_size = array_ref_element_size (exp);
-
- /* We assume all arrays have sizes that are a multiple of a byte.
- First subtract the lower bound, if any, in the type of the
- index, then convert to sizetype and multiply by the size of the
- array element. */
- if (! integer_zerop (low_bound))
- index = fold (build2 (MINUS_EXPR, TREE_TYPE (index),
- index, low_bound));
-
- offset = size_binop (PLUS_EXPR, offset,
- size_binop (MULT_EXPR,
- convert (sizetype, index),
- unit_size));
+ default:
+ goto done;
}
- /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal
- conversions that don't change the mode, and all view conversions
- except those that need to "step up" the alignment. */
- else if (TREE_CODE (exp) != NON_LVALUE_EXPR
- && ! (TREE_CODE (exp) == VIEW_CONVERT_EXPR
- && ! ((TYPE_ALIGN (TREE_TYPE (exp))
- > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
- && STRICT_ALIGNMENT
- && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
- < BIGGEST_ALIGNMENT)
- && (TYPE_ALIGN_OK (TREE_TYPE (exp))
- || TYPE_ALIGN_OK (TREE_TYPE
- (TREE_OPERAND (exp, 0))))))
- && ! ((TREE_CODE (exp) == NOP_EXPR
- || TREE_CODE (exp) == CONVERT_EXPR)
- && (TYPE_MODE (TREE_TYPE (exp))
- == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))))
- break;
-
/* If any reference in the chain is volatile, the effect is volatile. */
if (TREE_THIS_VOLATILE (exp))
*pvolatilep = 1;
exp = TREE_OPERAND (exp, 0);
}
+ done:
/* If OFFSET is constant, see if we can return the whole thing as a
constant bit position. Otherwise, split it up. */
case COMPONENT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
- case NON_LVALUE_EXPR:
case VIEW_CONVERT_EXPR:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
return 1;
- /* ??? Sure they are handled, but get_inner_reference may return
- a different PBITSIZE, depending upon whether the expression is
- wrapped up in a NOP_EXPR or not, e.g. for bitfields. */
- case NOP_EXPR:
- case CONVERT_EXPR:
- return (TYPE_MODE (TREE_TYPE (t))
- == TYPE_MODE (TREE_TYPE (TREE_OPERAND (t, 0))));
-
default:
return 0;
}
if (exp_rtl)
break;
- nops = first_rtl_op (TREE_CODE (exp));
+ nops = TREE_CODE_LENGTH (TREE_CODE (exp));
for (i = 0; i < nops; i++)
if (TREE_OPERAND (exp, i) != 0
&& ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
return result;
}
+ /* Pass FALSE as the last argument to get_inner_reference although
+ we are expanding to RTL. The rationale is that we know how to
+ handle "aligning nodes" here: we can just bypass them because
+ they won't change the final object whose address will be returned
+ (they actually exist only for that purpose). */
inner = get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode1, &unsignedp, &volatilep);
+ &mode1, &unsignedp, &volatilep, false);
break;
}
expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, modifier);
return const0_rtx;
}
- else if ((code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)
- && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
- /* If the second operand has no side effects, just evaluate
- the first. */
- return expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
- modifier);
else if (code == BIT_FIELD_REF)
{
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier);
tree exp1 = TREE_OPERAND (exp, 0);
tree orig;
- if (code == MISALIGNED_INDIRECT_REF
- && !targetm.vectorize.misaligned_mem_ok (mode))
- abort ();
-
if (modifier != EXPAND_WRITE)
{
tree t;
orig = exp;
set_mem_attributes (temp, orig, 0);
+ /* Resolve the misalignment now, so that we don't have to remember
+ to resolve it later. Of course, this only works for reads. */
+ /* ??? When we get around to supporting writes, we'll have to handle
+ this in store_expr directly. The vectorizer isn't generating
+ those yet, however. */
+ if (code == MISALIGNED_INDIRECT_REF)
+ {
+ int icode;
+ rtx reg, insn;
+
+ gcc_assert (modifier == EXPAND_NORMAL);
+
+ /* The vectorizer should have already checked the mode. */
+ icode = movmisalign_optab->handlers[mode].insn_code;
+ gcc_assert (icode != CODE_FOR_nothing);
+
+ /* We've already validated the memory, and we're creating a
+ new pseudo destination. The predicates really can't fail. */
+ reg = gen_reg_rtx (mode);
+
+ /* Nor can the insn generator. */
+ insn = GEN_FCN (icode) (reg, temp);
+ emit_insn (insn);
+
+ return reg;
+ }
+
return temp;
}
{
tree array = TREE_OPERAND (exp, 0);
- tree low_bound = array_ref_low_bound (exp);
- tree index = convert (sizetype, TREE_OPERAND (exp, 1));
- HOST_WIDE_INT i;
-
- gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
-
- /* Optimize the special-case of a zero lower bound.
-
- We convert the low_bound to sizetype to avoid some problems
- with constant folding. (E.g. suppose the lower bound is 1,
- and its mode is QI. Without the conversion, (ARRAY
- +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */
-
- if (! integer_zerop (low_bound))
- index = size_diffop (index, convert (sizetype, low_bound));
+ tree index = TREE_OPERAND (exp, 1);
/* Fold an expression like: "foo"[2].
This is not done in fold so it won't happen inside &.
&& modifier != EXPAND_MEMORY
&& TREE_CODE (array) == CONSTRUCTOR
&& ! TREE_SIDE_EFFECTS (array)
- && TREE_CODE (index) == INTEGER_CST
- && 0 > compare_tree_int (index,
- list_length (CONSTRUCTOR_ELTS
- (TREE_OPERAND (exp, 0)))))
+ && TREE_CODE (index) == INTEGER_CST)
{
tree elem;
- for (elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)),
- i = TREE_INT_CST_LOW (index);
- elem != 0 && i != 0; i--, elem = TREE_CHAIN (elem))
+ for (elem = CONSTRUCTOR_ELTS (array);
+ (elem && !tree_int_cst_equal (TREE_PURPOSE (elem), index));
+ elem = TREE_CHAIN (elem))
;
- if (elem)
+ if (elem && !TREE_SIDE_EFFECTS (TREE_VALUE (elem)))
return expand_expr (fold (TREE_VALUE (elem)), target, tmode,
modifier);
}
tree offset;
int volatilep = 0;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode1, &unsignedp, &volatilep);
+ &mode1, &unsignedp, &volatilep, true);
rtx orig_op0;
/* If we got back the original object, something is wrong. Perhaps
/* Store data into beginning of memory target. */
store_expr (TREE_OPERAND (exp, 0),
adjust_address (target, TYPE_MODE (valtype), 0),
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ modifier == EXPAND_STACK_PARM);
else
{
* BITS_PER_UNIT),
(HOST_WIDE_INT) GET_MODE_BITSIZE (mode)),
0, TYPE_MODE (valtype), TREE_OPERAND (exp, 0),
- VOIDmode, 0, type, 0);
+ type, 0);
}
/* Return the entire union. */
}
op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier);
- op0 = REDUCE_BIT_FIELD (op0);
if (GET_MODE (op0) == mode)
- return op0;
+ ;
/* If OP0 is a constant, just convert it into the proper mode. */
- if (CONSTANT_P (op0))
+ else if (CONSTANT_P (op0))
{
tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
enum machine_mode inner_mode = TYPE_MODE (inner_type);
if (modifier == EXPAND_INITIALIZER)
- return simplify_gen_subreg (mode, op0, inner_mode,
- subreg_lowpart_offset (mode,
- inner_mode));
+ op0 = simplify_gen_subreg (mode, op0, inner_mode,
+ subreg_lowpart_offset (mode,
+ inner_mode));
else
- return convert_modes (mode, inner_mode, op0,
- TYPE_UNSIGNED (inner_type));
+ op0= convert_modes (mode, inner_mode, op0,
+ TYPE_UNSIGNED (inner_type));
}
- if (modifier == EXPAND_INITIALIZER)
- return gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
+ else if (modifier == EXPAND_INITIALIZER)
+ op0 = gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
- if (target == 0)
- return
- convert_to_mode (mode, op0,
- TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ else if (target == 0)
+ op0 = convert_to_mode (mode, op0,
+ TYPE_UNSIGNED (TREE_TYPE
+ (TREE_OPERAND (exp, 0))));
else
- convert_move (target, op0,
- TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
- return target;
+ {
+ convert_move (target, op0,
+ TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ op0 = target;
+ }
+
+ return REDUCE_BIT_FIELD (op0);
case VIEW_CONVERT_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier);
/* At this point, a MEM target is no longer useful; we will get better
code without it. */
- if (MEM_P (target))
+ if (! REG_P (target))
target = gen_reg_rtx (mode);
/* If op1 was placed in target, swap op0 and op1. */
op1 = tem;
}
+ /* We generate better code and avoid problems with op1 mentioning
+ target by forcing op1 into a pseudo if it isn't a constant. */
+ if (! CONSTANT_P (op1))
+ op1 = force_reg (mode, op1);
+
if (target != op0)
emit_move_insn (target, op0);
return const0_rtx;
case COND_EXPR:
- /* If it's void, we don't need to worry about computing a value. */
- if (VOID_TYPE_P (TREE_TYPE (exp)))
- {
- tree pred = TREE_OPERAND (exp, 0);
- tree then_ = TREE_OPERAND (exp, 1);
- tree else_ = TREE_OPERAND (exp, 2);
-
- gcc_assert (TREE_CODE (then_) == GOTO_EXPR
- && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL
- && TREE_CODE (else_) == GOTO_EXPR
- && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL);
-
- jumpif (pred, label_rtx (GOTO_DESTINATION (then_)));
- return expand_expr (else_, const0_rtx, VOIDmode, 0);
- }
+ /* A COND_EXPR with its type being VOID_TYPE represents a
+ conditional jump and is handled in
+ expand_gimple_cond_expr. */
+ gcc_assert (!VOID_TYPE_P (TREE_TYPE (exp)));
/* Note that COND_EXPRs whose type is a structure or union
are required to be constructed to contain assignments of
op1 = gen_label_rtx ();
jumpifnot (TREE_OPERAND (exp, 0), op0);
store_expr (TREE_OPERAND (exp, 1), temp,
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ modifier == EXPAND_STACK_PARM);
emit_jump_insn (gen_jump (op1));
emit_barrier ();
emit_label (op0);
store_expr (TREE_OPERAND (exp, 2), temp,
- modifier == EXPAND_STACK_PARM ? 2 : 0);
+ modifier == EXPAND_STACK_PARM);
emit_label (op1);
OK_DEFER_POP;
expand_assignment (lhs, convert (TREE_TYPE (rhs),
(TREE_CODE (rhs) == BIT_IOR_EXPR
? integer_one_node
- : integer_zero_node)),
- 0);
+ : integer_zero_node)));
do_pending_stack_adjust ();
emit_label (label);
return const0_rtx;
}
- expand_assignment (lhs, rhs, 0);
+ expand_assignment (lhs, rhs);
return const0_rtx;
}
case ADDR_EXPR:
return expand_expr_addr_expr (exp, target, tmode, modifier);
- /* COMPLEX type for Extended Pascal & Fortran */
case COMPLEX_EXPR:
- {
- enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
- rtx insns;
-
- /* Get the rtx code of the operands. */
- op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
-
- if (! target)
- target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
-
- start_sequence ();
-
- /* Move the real (op0) and imaginary (op1) parts to their location. */
- emit_move_insn (gen_realpart (mode, target), op0);
- emit_move_insn (gen_imagpart (mode, target), op1);
+ /* Get the rtx code of the operands. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
- insns = get_insns ();
- end_sequence ();
+ if (!target)
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
- /* Complex construction should appear as a single unit. */
- /* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS,
- each with a separate pseudo as destination.
- It's not correct for flow to treat them as a unit. */
- if (GET_CODE (target) != CONCAT)
- emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
- else
- emit_insn (insns);
+ /* Move the real (op0) and imaginary (op1) parts to their location. */
+ write_complex_part (target, op0, false);
+ write_complex_part (target, op1, true);
- return target;
- }
+ return target;
case REALPART_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- return gen_realpart (mode, op0);
+ return read_complex_part (op0, false);
case IMAGPART_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- return gen_imagpart (mode, op0);
+ return read_complex_part (op0, true);
case RESX_EXPR:
expand_resx_expr (exp);
case POSTDECREMENT_EXPR:
case LOOP_EXPR:
case EXIT_EXPR:
- case LABELED_BLOCK_EXPR:
- case EXIT_BLOCK_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Lowered by gimplify.c. */