* config/i386/i386.md ((unnamed inc/dec peephole): Use
optimize_insn_for_size_p instead
of optimize_size.
* config/i386/predicates.md (incdec_operand): Likewise.
(aligned_operand): Likewise.
* config/i386/sse.md (divv8sf3): Likewise.
(sqrtv8sf2): Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@147178
138bc75d-0d04-0410-961f-
82ee72b054a4
2009-05-06 H.J. Lu <hongjiu.lu@intel.com>
+ * config/i386/i386.md ((unnamed inc/dec peephole): Use
+ optimize_insn_for_size_p instead
+ of optimize_size.
+ * config/i386/predicates.md (incdec_operand): Likewise.
+ (aligned_operand): Likewise.
+ * config/i386/sse.md (divv8sf3): Likewise.
+ (sqrtv8sf2): Likewise.
+
+2009-05-06 H.J. Lu <hongjiu.lu@intel.com>
+
* config/i386/i386.c (ix86_build_signbit_mask): Make it static.
* config/i386/i386-protos.h (ix86_build_signbit_mask): Removed.
(match_operator 1 "compare_operator"
[(match_operand 2 "register_operand" "")
(match_operand 3 "const_int_operand" "")]))]
- "(((!TARGET_FUSE_CMP_AND_BRANCH || optimize_size)
+ "(((!TARGET_FUSE_CMP_AND_BRANCH || optimize_insn_for_size_p ())
&& incdec_operand (operands[3], GET_MODE (operands[3])))
|| (!TARGET_FUSE_CMP_AND_BRANCH
&& INTVAL (operands[3]) == 128))
{
/* On Pentium4, the inc and dec operations causes extra dependency on flag
registers, since carry flag is not set. */
- if (!TARGET_USE_INCDEC && !optimize_size)
+ if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
return 0;
return op == const1_rtx || op == constm1_rtx;
})
/* All patterns using aligned_operand on memory operands ends up
in promoting memory operand to 64bit and thus causing memory mismatch. */
- if (TARGET_MEMORY_MISMATCH_STALL && !optimize_size)
+ if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
return 0;
/* Don't even try to do any aligned optimizations with volatiles. */
{
ix86_fixup_binary_operands_no_copy (DIV, V8SFmode, operands);
- if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
+ if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
(sqrt:V8SF (match_operand:V8SF 1 "nonimmediate_operand" "")))]
"TARGET_AVX"
{
- if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
+ if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{