struct move_by_pieces *);
static bool block_move_libcall_safe_for_call_parm (void);
static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned);
-static rtx emit_block_move_via_libcall (rtx, rtx, rtx);
+static rtx emit_block_move_via_libcall (rtx, rtx, rtx, bool);
static tree emit_block_move_libcall_fn (int);
static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode);
static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode,
struct store_by_pieces *);
static bool clear_storage_via_clrmem (rtx, rtx, unsigned);
-static rtx clear_storage_via_libcall (rtx, rtx);
+static rtx clear_storage_via_libcall (rtx, rtx, bool);
static tree clear_storage_libcall_fn (int);
static rtx compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
}
if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
{
+ rtx new_from;
enum machine_mode full_mode
= smallest_mode_for_size (GET_MODE_BITSIZE (from_mode), MODE_INT);
gcc_assert (sext_optab->handlers[full_mode][from_mode].insn_code
!= CODE_FOR_nothing);
- emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code,
- to, from, UNKNOWN);
if (to_mode == full_mode)
- return;
+ {
+ emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code,
+ to, from, UNKNOWN);
+ return;
+ }
+
+ new_from = gen_reg_rtx (full_mode);
+ emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code,
+ new_from, from, UNKNOWN);
/* else proceed to integer conversions below. */
from_mode = full_mode;
+ from = new_from;
}
/* Now both modes are integers. */
switch (method)
{
case BLOCK_OP_NORMAL:
+ case BLOCK_OP_TAILCALL:
may_use_call = true;
break;
else if (emit_block_move_via_movmem (x, y, size, align))
;
else if (may_use_call)
- retval = emit_block_move_via_libcall (x, y, size);
+ retval = emit_block_move_via_libcall (x, y, size,
+ method == BLOCK_OP_TAILCALL);
else
emit_block_move_via_loop (x, y, size, align);
Return the return value from memcpy, 0 otherwise. */
static rtx
-emit_block_move_via_libcall (rtx dst, rtx src, rtx size)
+emit_block_move_via_libcall (rtx dst, rtx src, rtx size, bool tailcall)
{
rtx dst_addr, src_addr;
tree call_expr, arg_list, fn, src_tree, dst_tree, size_tree;
call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
call_expr = build3 (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
call_expr, arg_list, NULL_TREE);
+ CALL_EXPR_TAILCALL (call_expr) = tailcall;
retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
its length in bytes. */
rtx
-clear_storage (rtx object, rtx size)
+clear_storage (rtx object, rtx size, enum block_op_methods method)
{
enum machine_mode mode = GET_MODE (object);
unsigned int align;
+ gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
+
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
if (mode != BLKmode
else if (clear_storage_via_clrmem (object, size, align))
;
else
- return clear_storage_via_libcall (object, size);
+ return clear_storage_via_libcall (object, size,
+ method == BLOCK_OP_TAILCALL);
return NULL;
}
Return the return value of memset, 0 otherwise. */
static rtx
-clear_storage_via_libcall (rtx object, rtx size)
+clear_storage_via_libcall (rtx object, rtx size, bool tailcall)
{
tree call_expr, arg_list, fn, object_tree, size_tree;
enum machine_mode size_mode;
call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
call_expr = build3 (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
call_expr, arg_list, NULL_TREE);
+ CALL_EXPR_TAILCALL (call_expr) = tailcall;
retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
the original object if it spans an even number of hard regs.
This special case is important for SCmode on 64-bit platforms
where the natural size of floating-point regs is 32-bit. */
- || (GET_CODE (cplx) == REG
+ || (REG_P (cplx)
&& REGNO (cplx) < FIRST_PSEUDO_REGISTER
&& hard_regno_nregs[REGNO (cplx)][cmode] % 2 == 0)
/* For MEMs we always try to make a "subreg", that is to adjust
the original object if it spans an even number of hard regs.
This special case is important for SCmode on 64-bit platforms
where the natural size of floating-point regs is 32-bit. */
- || (GET_CODE (cplx) == REG
+ || (REG_P (cplx)
&& REGNO (cplx) < FIRST_PSEUDO_REGISTER
&& hard_regno_nregs[REGNO (cplx)][cmode] % 2 == 0)
/* For MEMs we always try to make a "subreg", that is to adjust
}
if (size != const0_rtx)
- clear_storage (target, size);
+ clear_storage (target, size, BLOCK_OP_NORMAL);
if (label)
emit_label (label);
&& ! CONSTRUCTOR_ELTS (exp))
/* If the constructor is empty, clear the union. */
{
- clear_storage (target, expr_size (exp));
+ clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
cleared = 1;
}
|| ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target))
== size)))
{
- clear_storage (target, GEN_INT (size));
+ clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
cleared = 1;
}
if (REG_P (target))
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
else
- clear_storage (target, GEN_INT (size));
+ clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
cleared = 1;
}
if (REG_P (target))
emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
else
- clear_storage (target, GEN_INT (size));
+ clear_storage (target, GEN_INT (size), BLOCK_OP_NORMAL);
cleared = 1;
}
? !TREE_ASM_WRITTEN (var)
: !DECL_RTL_SET_P (var))
{
- if (TREE_CODE (var) == VAR_DECL && DECL_VALUE_EXPR (var))
+ if (TREE_CODE (var) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (var))
/* Should be ignored. */;
else if (lang_hooks.expand_decl (var))
/* OK. */;
/* If the DECL isn't in memory, then the DECL wasn't properly
marked TREE_ADDRESSABLE, which will be either a front-end
or a tree optimizer bug. */
- gcc_assert (GET_CODE (result) == MEM);
+ gcc_assert (MEM_P (result));
result = XEXP (result, 0);
/* ??? Is this needed anymore? */
case INDIRECT_REF:
{
tree exp1 = TREE_OPERAND (exp, 0);
- tree orig;
if (modifier != EXPAND_WRITE)
{
temp = gen_rtx_MEM (mode, op0);
- orig = REF_ORIGINAL (exp);
- if (!orig)
- orig = exp;
- set_mem_attributes (temp, orig, 0);
+ set_mem_attributes (temp, exp, 0);
/* Resolve the misalignment now, so that we don't have to remember
to resolve it later. Of course, this only works for reads. */
return temp;
}
+ case TARGET_MEM_REF:
+ {
+ struct mem_address addr;
+
+ get_address_description (exp, &addr);
+ op0 = addr_for_mem_ref (&addr, true);
+ op0 = memory_address (mode, op0);
+ temp = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (temp, TMR_ORIGINAL (exp), 0);
+ }
+ return temp;
+
case ARRAY_REF:
{
optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
- if (mode == GET_MODE_WIDER_MODE (innermode))
+ if (mode == GET_MODE_2XWIDER_MODE (innermode))
{
if (this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
return expand_divmod (0, code, mode, op0, op1, target, unsignedp);
case RDIV_EXPR:
- /* Emit a/b as a*(1/b). Later we may manage CSE the reciprocal saving
- expensive divide. If not, combine will rebuild the original
- computation. */
- if (flag_unsafe_math_optimizations && optimize && !optimize_size
- && TREE_CODE (type) == REAL_TYPE
- && !real_onep (TREE_OPERAND (exp, 0)))
- return expand_expr (build2 (MULT_EXPR, type, TREE_OPERAND (exp, 0),
- build2 (RDIV_EXPR, type,
- build_real (type, dconst1),
- TREE_OPERAND (exp, 1))),
- target, tmode, modifier);
-
goto binop;
case TRUNC_MOD_EXPR:
if ((code == LT && integer_zerop (arg1))
|| (! only_cheap && code == GE && integer_zerop (arg1)))
;
- else if (BRANCH_COST >= 0
- && ! only_cheap && (code == NE || code == EQ)
+ else if (! only_cheap && (code == NE || code == EQ)
&& TREE_CODE (type) != REAL_TYPE
&& ((abs_optab->handlers[(int) operand_mode].insn_code
!= CODE_FOR_nothing)