OSDN Git Service

2009-05-06 H.J. Lu <hongjiu.lu@intel.com>
authorhjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>
Wed, 6 May 2009 13:50:04 +0000 (13:50 +0000)
committerhjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>
Wed, 6 May 2009 13:50:04 +0000 (13:50 +0000)
* config/i386/i386.md ((unnamed inc/dec peephole): Use
optimize_insn_for_size_p instead
of optimize_size.
* config/i386/predicates.md (incdec_operand): Likewise.
(aligned_operand): Likewise.
* config/i386/sse.md (divv8sf3): Likewise.
(sqrtv8sf2): Likewise.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@147178 138bc75d-0d04-0410-961f-82ee72b054a4

gcc/ChangeLog
gcc/config/i386/i386.md
gcc/config/i386/predicates.md
gcc/config/i386/sse.md

index 02af9fe..0594686 100644 (file)
@@ -1,5 +1,15 @@
 2009-05-06  H.J. Lu  <hongjiu.lu@intel.com>
 
+       * config/i386/i386.md ((unnamed inc/dec peephole): Use
+       optimize_insn_for_size_p instead
+       of optimize_size.
+       * config/i386/predicates.md (incdec_operand): Likewise.
+       (aligned_operand): Likewise.
+       * config/i386/sse.md (divv8sf3): Likewise.
+       (sqrtv8sf2): Likewise.
+
+2009-05-06  H.J. Lu  <hongjiu.lu@intel.com>
+
        * config/i386/i386.c (ix86_build_signbit_mask): Make it static.
 
        * config/i386/i386-protos.h (ix86_build_signbit_mask): Removed.
index 2979431..1bb96fd 100644 (file)
        (match_operator 1 "compare_operator"
          [(match_operand 2 "register_operand" "")
           (match_operand 3 "const_int_operand" "")]))]
-  "(((!TARGET_FUSE_CMP_AND_BRANCH || optimize_size)
+  "(((!TARGET_FUSE_CMP_AND_BRANCH || optimize_insn_for_size_p ())
      && incdec_operand (operands[3], GET_MODE (operands[3])))
     || (!TARGET_FUSE_CMP_AND_BRANCH
        && INTVAL (operands[3]) == 128))
index f1c7103..785ff5d 100644 (file)
 {
   /* On Pentium4, the inc and dec operations causes extra dependency on flag
      registers, since carry flag is not set.  */
-  if (!TARGET_USE_INCDEC && !optimize_size)
+  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
     return 0;
   return op == const1_rtx || op == constm1_rtx;
 })
 
   /* All patterns using aligned_operand on memory operands ends up
      in promoting memory operand to 64bit and thus causing memory mismatch.  */
-  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_size)
+  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
     return 0;
 
   /* Don't even try to do any aligned optimizations with volatiles.  */
index ece1b43..ae23746 100644 (file)
 {
   ix86_fixup_binary_operands_no_copy (DIV, V8SFmode, operands);
 
-  if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
+  if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
       && flag_finite_math_only && !flag_trapping_math
       && flag_unsafe_math_optimizations)
     {
        (sqrt:V8SF (match_operand:V8SF 1 "nonimmediate_operand" "")))]
   "TARGET_AVX"
 {
-  if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
+  if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
       && flag_finite_math_only && !flag_trapping_math
       && flag_unsafe_math_optimizations)
     {