OSDN Git Service

* gcc-interface/utils.c (MAX_FIXED_MODE_SIZE): Delete.
[pf3gnuchains/gcc-fork.git] / gcc / bb-reorder.c
index 3bf2dc7..47828bf 100644 (file)
 #include "tree-pass.h"
 #include "df.h"
 
-#ifndef HAVE_conditional_execution
-#define HAVE_conditional_execution 0
-#endif
-
 /* The number of rounds.  In most cases there will only be 4 rounds, but
    when partitioning hot and cold basic blocks into separate sections of
    the .o file there will be an extra round.*/
@@ -1420,7 +1416,7 @@ fix_up_fall_thru_edges (void)
 
                      fall_thru_label = block_label (fall_thru->dest);
 
-                     if (old_jump && fall_thru_label)
+                     if (old_jump && JUMP_P (old_jump) && fall_thru_label)
                        invert_worked = invert_jump (old_jump,
                                                     fall_thru_label,0);
                      if (invert_worked)
@@ -2177,7 +2173,6 @@ struct rtl_opt_pass pass_duplicate_computed_gotos =
 static void
 partition_hot_cold_basic_blocks (void)
 {
-  basic_block cur_bb;
   edge *crossing_edges;
   int n_crossing_edges;
   int max_edges = 2 * last_basic_block;
@@ -2187,13 +2182,6 @@ partition_hot_cold_basic_blocks (void)
 
   crossing_edges = XCNEWVEC (edge, max_edges);
 
-  cfg_layout_initialize (0);
-
-  FOR_EACH_BB (cur_bb)
-    if (cur_bb->index >= NUM_FIXED_BLOCKS
-       && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
-      cur_bb->aux = cur_bb->next_bb;
-
   find_rarely_executed_basic_blocks_and_crossing_edges (&crossing_edges,
                                                        &n_crossing_edges,
                                                        &max_edges);
@@ -2202,8 +2190,6 @@ partition_hot_cold_basic_blocks (void)
     fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
 
   free (crossing_edges);
-
-  cfg_layout_finalize ();
 }
 \f
 static bool
@@ -2225,7 +2211,15 @@ rest_of_handle_reorder_blocks (void)
      splitting possibly introduced more crossjumping opportunities.  */
   cfg_layout_initialize (CLEANUP_EXPENSIVE);
 
-  if (flag_reorder_blocks || flag_reorder_blocks_and_partition)
+  if ((flag_reorder_blocks || flag_reorder_blocks_and_partition)
+      /* Don't reorder blocks when optimizing for size because extra jump insns may
+        be created; also barrier may create extra padding.
+
+        More correctly we should have a block reordering mode that tried to
+        minimize the combined size of all the jumps.  This would more or less
+        automatically remove extra jumps, but would also try to use more short
+        jumps instead of long jumps.  */
+      && optimize_function_for_speed_p (cfun))
     {
       reorder_basic_blocks ();
       cleanup_cfg (CLEANUP_EXPENSIVE);
@@ -2292,12 +2286,10 @@ struct rtl_opt_pass pass_partition_blocks =
   NULL,                                 /* next */
   0,                                    /* static_pass_number */
   TV_REORDER_BLOCKS,                    /* tv_id */
-  0,                                    /* properties_required */
+  PROP_cfglayout,                       /* properties_required */
   0,                                    /* properties_provided */
   0,                                    /* properties_destroyed */
   0,                                    /* todo_flags_start */
   TODO_dump_func | TODO_verify_rtl_sharing/* todo_flags_finish */
  }
 };
-
-