else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
SUBST (XEXP (x, 1),
force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
- targetm.shift_truncation_mask (GET_MODE (x)),
+ ((unsigned HOST_WIDE_INT) 1
+ << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
+ - 1,
0));
break;
rtx *cc_use;
/* (set (pc) (return)) gets written as (return). */
- if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
+ if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
return src;
/* Now that we know for sure which bits of SRC we are using, see if we can
rtx temp2 = expand_compound_operation (temp);
/* Make sure this is a profitable operation. */
- if (rtx_cost (x, SET, optimize_this_for_speed_p)
- > rtx_cost (temp2, SET, optimize_this_for_speed_p))
+ if (set_src_cost (x, optimize_this_for_speed_p)
+ > set_src_cost (temp2, optimize_this_for_speed_p))
return temp2;
- else if (rtx_cost (x, SET, optimize_this_for_speed_p)
- > rtx_cost (temp, SET, optimize_this_for_speed_p))
+ else if (set_src_cost (x, optimize_this_for_speed_p)
+ > set_src_cost (temp, optimize_this_for_speed_p))
return temp;
else
return x;
/* Prefer ZERO_EXTENSION, since it gives more information to
backends. */
- if (rtx_cost (temp, SET, optimize_this_for_speed_p)
- <= rtx_cost (temp1, SET, optimize_this_for_speed_p))
+ if (set_src_cost (temp, optimize_this_for_speed_p)
+ <= set_src_cost (temp1, optimize_this_for_speed_p))
return temp;
return temp1;
}
/* Prefer ZERO_EXTENSION, since it gives more information to
backends. */
- if (rtx_cost (temp1, SET, optimize_this_for_speed_p)
- < rtx_cost (temp, SET, optimize_this_for_speed_p))
+ if (set_src_cost (temp1, optimize_this_for_speed_p)
+ < set_src_cost (temp, optimize_this_for_speed_p))
temp = temp1;
}
pos_rtx = temp;
&& GET_CODE (lhs) == ASHIFT
&& CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
+ && INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (rhs) < mode_width)
{
new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
y = simplify_gen_binary (AND, GET_MODE (x),
XEXP (x, 0), GEN_INT (cval));
- if (rtx_cost (y, SET, optimize_this_for_speed_p)
- < rtx_cost (x, SET, optimize_this_for_speed_p))
+ if (set_src_cost (y, optimize_this_for_speed_p)
+ < set_src_cost (x, optimize_this_for_speed_p))
x = y;
}
tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
new_op0, new_op1));
if (GET_CODE (tmp) != outer_code
- && rtx_cost (tmp, SET, optimize_this_for_speed_p)
- < rtx_cost (x, SET, optimize_this_for_speed_p))
+ && (set_src_cost (tmp, optimize_this_for_speed_p)
+ < set_src_cost (x, optimize_this_for_speed_p)))
return tmp;
return NULL_RTX;
want to do this inside the loop as it makes it more difficult to
combine shifts. */
if (SHIFT_COUNT_TRUNCATED)
- orig_count &= targetm.shift_truncation_mask (mode);
+ orig_count &= GET_MODE_BITSIZE (mode) - 1;
/* If we were given an invalid count, don't do anything except exactly
what was requested. */
}
break;
+ case REG_ARGS_SIZE:
+ {
+ /* ??? How to distribute between i3-i1. Assume i3 contains the
+ entire adjustment. Assert i3 contains at least some adjust. */
+ int old_size, args_size = INTVAL (XEXP (note, 0));
+ old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
+ gcc_assert (old_size != args_size);
+ }
+ break;
+
case REG_NORETURN:
case REG_SETJMP:
/* These notes must remain with the call. It should not be