rtx *cc_use;
/* (set (pc) (return)) gets written as (return). */
- if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
+ if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
return src;
/* Now that we know for sure which bits of SRC we are using, see if we can
rtx temp2 = expand_compound_operation (temp);
/* Make sure this is a profitable operation. */
- if (rtx_cost (x, SET, optimize_this_for_speed_p)
- > rtx_cost (temp2, SET, optimize_this_for_speed_p))
+ if (set_src_cost (x, optimize_this_for_speed_p)
+ > set_src_cost (temp2, optimize_this_for_speed_p))
return temp2;
- else if (rtx_cost (x, SET, optimize_this_for_speed_p)
- > rtx_cost (temp, SET, optimize_this_for_speed_p))
+ else if (set_src_cost (x, optimize_this_for_speed_p)
+ > set_src_cost (temp, optimize_this_for_speed_p))
return temp;
else
return x;
/* Prefer ZERO_EXTENSION, since it gives more information to
backends. */
- if (rtx_cost (temp, SET, optimize_this_for_speed_p)
- <= rtx_cost (temp1, SET, optimize_this_for_speed_p))
+ if (set_src_cost (temp, optimize_this_for_speed_p)
+ <= set_src_cost (temp1, optimize_this_for_speed_p))
return temp;
return temp1;
}
/* Prefer ZERO_EXTENSION, since it gives more information to
backends. */
- if (rtx_cost (temp1, SET, optimize_this_for_speed_p)
- < rtx_cost (temp, SET, optimize_this_for_speed_p))
+ if (set_src_cost (temp1, optimize_this_for_speed_p)
+ < set_src_cost (temp, optimize_this_for_speed_p))
temp = temp1;
}
pos_rtx = temp;
y = simplify_gen_binary (AND, GET_MODE (x),
XEXP (x, 0), GEN_INT (cval));
- if (rtx_cost (y, SET, optimize_this_for_speed_p)
- < rtx_cost (x, SET, optimize_this_for_speed_p))
+ if (set_src_cost (y, optimize_this_for_speed_p)
+ < set_src_cost (x, optimize_this_for_speed_p))
x = y;
}
tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
new_op0, new_op1));
if (GET_CODE (tmp) != outer_code
- && rtx_cost (tmp, SET, optimize_this_for_speed_p)
- < rtx_cost (x, SET, optimize_this_for_speed_p))
+ && (set_src_cost (tmp, optimize_this_for_speed_p)
+ < set_src_cost (x, optimize_this_for_speed_p)))
return tmp;
return NULL_RTX;
later on, and then we wouldn't know whether to sign- or
zero-extend. */
mode = GET_MODE (XEXP (op0, 0));
- if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ if (GET_MODE_CLASS (mode) == MODE_INT
&& ! unsigned_comparison_p
- && val_signbit_known_clear_p (mode, const_op)
+ && HWI_COMPUTABLE_MODE_P (mode)
+ && trunc_int_for_mode (const_op, mode) == const_op
&& have_insn_for (COMPARE, mode))
{
op0 = XEXP (op0, 0);
case ZERO_EXTEND:
mode = GET_MODE (XEXP (op0, 0));
- if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ if (GET_MODE_CLASS (mode) == MODE_INT
&& (unsigned_comparison_p || equality_comparison_p)
&& HWI_COMPUTABLE_MODE_P (mode)
- && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode))
+ && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
+ && const_op >= 0
&& have_insn_for (COMPARE, mode))
{
op0 = XEXP (op0, 0);
break;
case REG_ARGS_SIZE:
- {
- /* ??? How to distribute between i3-i1. Assume i3 contains the
- entire adjustment. Assert i3 contains at least some adjust. */
- int old_size, args_size = INTVAL (XEXP (note, 0));
- old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
- gcc_assert (old_size != args_size);
- }
+ /* ??? How to distribute between i3-i1. Assume i3 contains the
+ entire adjustment. Assert i3 contains at least some adjust. */
+ if (!noop_move_p (i3))
+ {
+ int old_size, args_size = INTVAL (XEXP (note, 0));
+ old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
+ gcc_assert (old_size != args_size);
+ }
break;
case REG_NORETURN:
case REG_SETJMP:
+ case REG_TM:
/* These notes must remain with the call. It should not be
possible for both I2 and I3 to be a call. */
if (CALL_P (i3))