OSDN Git Service

2004-10-18 Andrew Haley <aph@redhat.com>
[pf3gnuchains/gcc-fork.git] / gcc / bb-reorder.c
index 0f8719e..1d0b097 100644 (file)
@@ -1,5 +1,5 @@
 /* Basic block reordering routines for the GNU compiler.
-   Copyright (C) 2000, 2002, 2003 Free Software Foundation, Inc.
+   Copyright (C) 2000, 2002, 2003, 2004 Free Software Foundation, Inc.
 
    This file is part of GCC.
 
 #include "cfglayout.h"
 #include "fibheap.h"
 #include "target.h"
+#include "function.h"
+#include "tm_p.h"
+#include "obstack.h"
+#include "expr.h"
+#include "regs.h"
+
+/* The number of rounds.  In most cases there will only be 4 rounds, but
+   when partitioning hot and cold basic blocks into separate sections of
+   the .o file there will be an extra round.*/
+#define N_ROUNDS 5
+
+/* Stubs in case we don't have a return insn.
+   We have to check at runtime too, not only compiletime.  */  
+
+#ifndef HAVE_return
+#define HAVE_return 0
+#define gen_return() NULL_RTX
+#endif
 
-/* The number of rounds.  */
-#define N_ROUNDS 4
 
 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE.  */
-static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0};
+static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
 
 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0.  */
-static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0};
+static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
 
 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
    block the edge destination is not duplicated while connecting traces.  */
@@ -121,8 +137,7 @@ static bbro_basic_block_data *bbd;
 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
 
 /* Free the memory and set the pointer to NULL.  */
-#define FREE(P) \
-  do { if (P) { free (P); P = 0; } else { abort (); } } while (0)
+#define FREE(P) (gcc_assert (P), free (P), P = 0)
 
 /* Structure for holding information about a trace.  */
 struct trace
@@ -146,14 +161,64 @@ static void find_traces (int *, struct trace *);
 static basic_block rotate_loop (edge, struct trace *, int);
 static void mark_bb_visited (basic_block, int);
 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
-                                int, fibheap_t *);
+                                int, fibheap_t *, int);
 static basic_block copy_bb (basic_block, edge, basic_block, int);
 static fibheapkey_t bb_to_key (basic_block);
-static bool better_edge_p (basic_block, edge, int, int, int, int);
+static bool better_edge_p (basic_block, edge, int, int, int, int, edge);
 static void connect_traces (int, struct trace *);
 static bool copy_bb_p (basic_block, int);
 static int get_uncond_jump_length (void);
+static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
+static void add_unlikely_executed_notes (void);
+static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *, 
+                                                                 int *,
+                                                                 int *);
+static void mark_bb_for_unlikely_executed_section  (basic_block);
+static void add_labels_and_missing_jumps (edge *, int);
+static void add_reg_crossing_jump_notes (void);
+static void fix_up_fall_thru_edges (void);
+static void fix_edges_for_rarely_executed_code (edge *, int);
+static void fix_crossing_conditional_branches (void);
+static void fix_crossing_unconditional_branches (void);
 \f
+/* Check to see if bb should be pushed into the next round of trace
+   collections or not.  Reasons for pushing the block forward are 1).
+   If the block is cold, we are doing partitioning, and there will be
+   another round (cold partition blocks are not supposed to be
+   collected into traces until the very last round); or 2). There will
+   be another round, and the basic block is not "hot enough" for the
+   current round of trace collection.  */
+
+static bool
+push_to_next_round_p (basic_block bb, int round, int number_of_rounds,
+                     int exec_th, gcov_type count_th)
+{
+  bool there_exists_another_round;
+  bool cold_block;
+  bool block_not_hot_enough;
+  bool next_round_is_last;
+
+  there_exists_another_round = round < number_of_rounds - 1;
+  next_round_is_last = round + 1 == number_of_rounds - 1;
+
+  cold_block = (flag_reorder_blocks_and_partition 
+               && BB_PARTITION (bb) == BB_COLD_PARTITION);
+
+  block_not_hot_enough = (bb->frequency < exec_th 
+                         || bb->count < count_th
+                         || probably_never_executed_bb_p (bb));
+
+  if (flag_reorder_blocks_and_partition
+      && next_round_is_last
+      && BB_PARTITION (bb) != BB_COLD_PARTITION)
+    return false;
+  else if (there_exists_another_round
+      && (cold_block || block_not_hot_enough))
+    return true;
+  else 
+    return false;
+}
+
 /* Find the traces for Software Trace Cache.  Chain each trace through
    RBI()->next.  Store the number of traces to N_TRACES and description of
    traces to TRACES.  */
@@ -162,14 +227,24 @@ static void
 find_traces (int *n_traces, struct trace *traces)
 {
   int i;
+  int number_of_rounds;
   edge e;
+  edge_iterator ei;
   fibheap_t heap;
 
+  /* Add one extra round of trace collection when partitioning hot/cold
+     basic blocks into separate sections.  The last round is for all the
+     cold blocks (and ONLY the cold blocks).  */
+
+  number_of_rounds = N_ROUNDS - 1;
+  if (flag_reorder_blocks_and_partition)
+    number_of_rounds = N_ROUNDS;
+
   /* Insert entry points of function into heap.  */
   heap = fibheap_new ();
   max_entry_frequency = 0;
   max_entry_count = 0;
-  for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
+  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
     {
       bbd[e->dest->index].heap = heap;
       bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
@@ -181,7 +256,7 @@ find_traces (int *n_traces, struct trace *traces)
     }
 
   /* Find the traces.  */
-  for (i = 0; i < N_ROUNDS; i++)
+  for (i = 0; i < number_of_rounds; i++)
     {
       gcov_type count_threshold;
 
@@ -195,7 +270,8 @@ find_traces (int *n_traces, struct trace *traces)
 
       find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
                           max_entry_frequency * exec_threshold[i] / 1000,
-                          count_threshold, traces, n_traces, i, &heap);
+                          count_threshold, traces, n_traces, i, &heap,
+                          number_of_rounds);
     }
   fibheap_delete (heap);
 
@@ -236,7 +312,9 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
   do
     {
       edge e;
-      for (e = bb->succ; e; e = e->succ_next)
+      edge_iterator ei;
+
+      FOR_EACH_EDGE (e, ei, bb->succs)
        if (e->dest != EXIT_BLOCK_PTR
            && e->dest->rbi->visited != trace_n
            && (e->flags & EDGE_CAN_FALLTHRU)
@@ -307,15 +385,17 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
          prev_bb->rbi->next = best_bb->rbi->next;
 
          /* Try to get rid of uncond jump to cond jump.  */
-         if (prev_bb->succ && !prev_bb->succ->succ_next)
+         if (EDGE_COUNT (prev_bb->succs) == 1)
            {
-             basic_block header = prev_bb->succ->dest;
+             basic_block header = EDGE_SUCC (prev_bb, 0)->dest;
 
              /* Duplicate HEADER if it is a small block containing cond jump
                 in the end.  */
-             if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0))
+             if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
+                 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP, 
+                                    NULL_RTX))
                {
-                 copy_bb (header, prev_bb->succ, prev_bb, trace_n);
+                 copy_bb (header, EDGE_SUCC (prev_bb, 0), prev_bb, trace_n);
                }
            }
        }
@@ -354,8 +434,13 @@ mark_bb_visited (basic_block bb, int trace)
 static void
 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
                     struct trace *traces, int *n_traces, int round,
-                    fibheap_t *heap)
+                    fibheap_t *heap, int number_of_rounds)
 {
+  /* The following variable refers to the last round in which non-"cold" 
+     blocks may be collected into a trace.  */
+
+  int last_round = N_ROUNDS - 1;
+
   /* Heap for discarded basic blocks which are possible starting points for
      the next round.  */
   fibheap_t new_heap = fibheap_new ();
@@ -366,6 +451,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
       struct trace *trace;
       edge best_edge, e;
       fibheapkey_t key;
+      edge_iterator ei;
 
       bb = fibheap_extract_min (*heap);
       bbd[bb->index].heap = NULL;
@@ -374,10 +460,13 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
       if (dump_file)
        fprintf (dump_file, "Getting bb %d\n", bb->index);
 
-      /* If the BB's frequency is too low send BB to the next round.  */
-      if (round < N_ROUNDS - 1
-         && (bb->frequency < exec_th || bb->count < count_th
-             || probably_never_executed_bb_p (bb)))
+      /* If the BB's frequency is too low send BB to the next round.  When
+         partitioning hot/cold blocks into separate sections, make sure all
+         the cold blocks (and ONLY the cold blocks) go into the (extra) final
+         round.  */
+
+      if (push_to_next_round_p (bb, round, number_of_rounds, exec_th, 
+                               count_th))
        {
          int key = bb_to_key (bb);
          bbd[bb->index].heap = new_heap;
@@ -413,12 +502,9 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
                     bb->index, *n_traces - 1);
 
          /* Select the successor that will be placed after BB.  */
-         for (e = bb->succ; e; e = e->succ_next)
+         FOR_EACH_EDGE (e, ei, bb->succs)
            {
-#ifdef ENABLE_CHECKING
-             if (e->flags & EDGE_FAKE)
-               abort ();
-#endif
+             gcc_assert (!(e->flags & EDGE_FAKE));
 
              if (e->dest == EXIT_BLOCK_PTR)
                continue;
@@ -427,16 +513,24 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
                  && e->dest->rbi->visited != *n_traces)
                continue;
 
+             if (BB_PARTITION (e->dest) == BB_COLD_PARTITION
+                 && round < last_round)
+               continue;
+
              prob = e->probability;
              freq = EDGE_FREQUENCY (e);
 
              /* Edge that cannot be fallthru or improbable or infrequent
-                successor (ie. it is unsuitable successor).  */
+                successor (i.e. it is unsuitable successor).  */
              if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
                  || prob < branch_th || freq < exec_th || e->count < count_th)
                continue;
 
-             if (better_edge_p (bb, e, prob, freq, best_prob, best_freq))
+             /* If partitioning hot/cold basic blocks, don't consider edges
+                that cross section boundaries.  */
+
+             if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
+                                best_edge))
                {
                  best_edge = e;
                  best_prob = prob;
@@ -447,12 +541,12 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
          /* If the best destination has multiple predecessors, and can be
             duplicated cheaper than a jump, don't allow it to be added
             to a trace.  We'll duplicate it when connecting traces.  */
-         if (best_edge && best_edge->dest->pred->pred_next
+         if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
              && copy_bb_p (best_edge->dest, 0))
            best_edge = NULL;
 
          /* Add all non-selected successors to the heaps.  */
-         for (e = bb->succ; e; e = e->succ_next)
+         FOR_EACH_EDGE (e, ei, bb->succs)
            {
              if (e == best_edge
                  || e->dest == EXIT_BLOCK_PTR
@@ -490,7 +584,13 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
                      || prob < branch_th || freq < exec_th
                      || e->count < count_th)
                    {
-                     if (round < N_ROUNDS - 1)
+                     /* When partitioning hot/cold basic blocks, make sure
+                        the cold blocks (and only the cold blocks) all get
+                        pushed to the last round of trace collection.  */
+
+                     if (push_to_next_round_p (e->dest, round, 
+                                               number_of_rounds,
+                                               exec_th, count_th))
                        which_heap = new_heap;
                    }
 
@@ -541,9 +641,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
 
                          /* Check whether there is another edge from BB.  */
                          edge another_edge;
-                         for (another_edge = bb->succ;
-                              another_edge;
-                              another_edge = another_edge->succ_next)
+                         FOR_EACH_EDGE (another_edge, ei, bb->succs)
                            if (another_edge != best_edge)
                              break;
 
@@ -582,17 +680,17 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
 
                  */
 
-                 for (e = bb->succ; e; e = e->succ_next)
+                 FOR_EACH_EDGE (e, ei, bb->succs)
                    if (e != best_edge
                        && (e->flags & EDGE_CAN_FALLTHRU)
                        && !(e->flags & EDGE_COMPLEX)
                        && !e->dest->rbi->visited
-                       && !e->dest->pred->pred_next
-                       && e->dest->succ
-                       && (e->dest->succ->flags & EDGE_CAN_FALLTHRU)
-                       && !(e->dest->succ->flags & EDGE_COMPLEX)
-                       && !e->dest->succ->succ_next
-                       && e->dest->succ->dest == best_edge->dest
+                       && EDGE_COUNT (e->dest->preds) == 1
+                       && !(e->flags & EDGE_CROSSING)
+                       && EDGE_COUNT (e->dest->succs) == 1
+                       && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU)
+                       && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX)
+                       && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest
                        && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
                      {
                        best_edge = e;
@@ -615,7 +713,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
       /* The trace is terminated so we have to recount the keys in heap
         (some block can have a lower key because now one of its predecessors
         is an end of the trace).  */
-      for (e = bb->succ; e; e = e->succ_next)
+      FOR_EACH_EDGE (e, ei, bb->succs)
        {
          if (e->dest == EXIT_BLOCK_PTR
              || e->dest->rbi->visited)
@@ -656,11 +754,12 @@ copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
 {
   basic_block new_bb;
 
-  new_bb = cfg_layout_duplicate_bb (old_bb, e);
-  if (e->dest != new_bb)
-    abort ();
-  if (e->dest->rbi->visited)
-    abort ();
+  new_bb = duplicate_block (old_bb, e);
+  BB_COPY_PARTITION (new_bb, old_bb);
+
+  gcc_assert (e->dest == new_bb);
+  gcc_assert (!e->dest->rbi->visited);
+
   if (dump_file)
     fprintf (dump_file,
             "Duplicated bb %d (created bb %d)\n",
@@ -703,16 +802,18 @@ static fibheapkey_t
 bb_to_key (basic_block bb)
 {
   edge e;
-
+  edge_iterator ei;
   int priority = 0;
 
   /* Do not start in probably never executed blocks.  */
-  if (probably_never_executed_bb_p (bb))
+
+  if (BB_PARTITION (bb) == BB_COLD_PARTITION
+      || probably_never_executed_bb_p (bb))
     return BB_FREQ_MAX;
 
   /* Prefer blocks whose predecessor is an end of some trace
      or whose predecessor edge is EDGE_DFS_BACK.  */
-  for (e = bb->pred; e; e = e->pred_next)
+  FOR_EACH_EDGE (e, ei, bb->preds)
     {
       if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
          || (e->flags & EDGE_DFS_BACK))
@@ -739,7 +840,7 @@ bb_to_key (basic_block bb)
 
 static bool
 better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
-              int best_freq)
+              int best_freq, edge cur_best_edge)
 {
   bool is_better_edge;
 
@@ -770,6 +871,16 @@ better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
   else
     is_better_edge = false;
 
+  /* If we are doing hot/cold partitioning, make sure that we always favor
+     non-crossing edges over crossing edges.  */
+
+  if (!is_better_edge
+      && flag_reorder_blocks_and_partition 
+      && cur_best_edge 
+      && (cur_best_edge->flags & EDGE_CROSSING)
+      && !(e->flags & EDGE_CROSSING))
+    is_better_edge = true;
+
   return is_better_edge;
 }
 
@@ -779,7 +890,10 @@ static void
 connect_traces (int n_traces, struct trace *traces)
 {
   int i;
+  int unconnected_hot_trace_count = 0;
+  bool cold_connected = true;
   bool *connected;
+  bool *cold_traces;
   int last_trace;
   int freq_threshold;
   gcov_type count_threshold;
@@ -792,24 +906,74 @@ connect_traces (int n_traces, struct trace *traces)
 
   connected = xcalloc (n_traces, sizeof (bool));
   last_trace = -1;
-  for (i = 0; i < n_traces; i++)
+
+  /* If we are partitioning hot/cold basic blocks, mark the cold
+     traces as already connected, to remove them from consideration
+     for connection to the hot traces.  After the hot traces have all
+     been connected (determined by "unconnected_hot_trace_count"), we
+     will go back and connect the cold traces.  */
+
+  cold_traces = xcalloc (n_traces, sizeof (bool));
+
+  if (flag_reorder_blocks_and_partition)
+    for (i = 0; i < n_traces; i++)
+      {
+       if (BB_PARTITION (traces[i].first) == BB_COLD_PARTITION)
+         {
+           connected[i] = true;
+           cold_traces[i] = true;
+           cold_connected = false;
+         }
+       else
+         unconnected_hot_trace_count++;
+      }
+  
+  for (i = 0; i < n_traces || !cold_connected ; i++)
     {
       int t = i;
       int t2;
       edge e, best;
       int best_len;
 
+      /* If we are partitioning hot/cold basic blocks, check to see
+        if all the hot traces have been connected.  If so, go back
+        and mark the cold traces as unconnected so we can connect
+        them up too.  Re-set "i" to the first (unconnected) cold
+        trace. Use flag "cold_connected" to make sure we don't do
+         this step more than once.  */
+
+      if (flag_reorder_blocks_and_partition
+         && (i >= n_traces || unconnected_hot_trace_count <= 0)
+         && !cold_connected)
+       {
+         int j;
+         int first_cold_trace = -1;
+
+         for (j = 0; j < n_traces; j++)
+           if (cold_traces[j])
+             {
+               connected[j] = false;
+               if (first_cold_trace == -1)
+                 first_cold_trace = j;
+             }
+         i = t = first_cold_trace;
+         cold_connected = true;
+       }
+
       if (connected[t])
        continue;
 
       connected[t] = true;
+      if (unconnected_hot_trace_count > 0)
+       unconnected_hot_trace_count--;
 
       /* Find the predecessor traces.  */
       for (t2 = t; t2 > 0;)
        {
+         edge_iterator ei;
          best = NULL;
          best_len = 0;
-         for (e = traces[t2].first->pred; e; e = e->pred_next)
+         FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
            {
              int si = e->src->index;
 
@@ -832,6 +996,10 @@ connect_traces (int n_traces, struct trace *traces)
              best->src->rbi->next = best->dest;
              t2 = bbd[best->src->index].end_of_trace;
              connected[t2] = true;
+
+             if (unconnected_hot_trace_count > 0)
+               unconnected_hot_trace_count--;
+
              if (dump_file)
                {
                  fprintf (dump_file, "Connection: %d %d\n",
@@ -850,9 +1018,10 @@ connect_traces (int n_traces, struct trace *traces)
       while (1)
        {
          /* Find the continuation of the chain.  */
+         edge_iterator ei;
          best = NULL;
          best_len = 0;
-         for (e = traces[t].last->succ; e; e = e->succ_next)
+         FOR_EACH_EDGE (e, ei, traces[t].last->succs)
            {
              int di = e->dest->index;
 
@@ -881,6 +1050,8 @@ connect_traces (int n_traces, struct trace *traces)
              t = bbd[best->dest->index].start_of_trace;
              traces[last_trace].last->rbi->next = traces[t].first;
              connected[t] = true;
+             if (unconnected_hot_trace_count > 0)
+               unconnected_hot_trace_count--;
              last_trace = t;
            }
          else
@@ -890,12 +1061,13 @@ connect_traces (int n_traces, struct trace *traces)
              basic_block next_bb = NULL;
              bool try_copy = false;
 
-             for (e = traces[t].last->succ; e; e = e->succ_next)
+             FOR_EACH_EDGE (e, ei, traces[t].last->succs)
                if (e->dest != EXIT_BLOCK_PTR
                    && (e->flags & EDGE_CAN_FALLTHRU)
                    && !(e->flags & EDGE_COMPLEX)
                    && (!best || e->probability > best->probability))
                  {
+                   edge_iterator ei;
                    edge best2 = NULL;
                    int best2_len = 0;
 
@@ -911,7 +1083,7 @@ connect_traces (int n_traces, struct trace *traces)
                        continue;
                      }
 
-                   for (e2 = e->dest->succ; e2; e2 = e2->succ_next)
+                   FOR_EACH_EDGE (e2, ei, e->dest->succs)
                      {
                        int di = e2->dest->index;
 
@@ -940,6 +1112,9 @@ connect_traces (int n_traces, struct trace *traces)
                      }
                  }
 
+             if (flag_reorder_blocks_and_partition)
+               try_copy = false;
+
              /* Copy tiny blocks always; copy larger blocks only when the
                 edge is traversed frequently enough.  */
              if (try_copy
@@ -969,6 +1144,8 @@ connect_traces (int n_traces, struct trace *traces)
                      t = bbd[next_bb->index].start_of_trace;
                      traces[last_trace].last->rbi->next = traces[t].first;
                      connected[t] = true;
+                     if (unconnected_hot_trace_count > 0)
+                       unconnected_hot_trace_count--;
                      last_trace = t;
                    }
                  else
@@ -992,6 +1169,7 @@ connect_traces (int n_traces, struct trace *traces)
     }
 
   FREE (connected);
+  FREE (cold_traces);
 }
 
 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
@@ -1003,24 +1181,17 @@ copy_bb_p (basic_block bb, int code_may_grow)
   int size = 0;
   int max_size = uncond_jump_length;
   rtx insn;
-  int n_succ;
-  edge e;
 
   if (!bb->frequency)
     return false;
-  if (!bb->pred || !bb->pred->pred_next)
+  if (EDGE_COUNT (bb->preds) < 2)
     return false;
-  if (!cfg_layout_can_duplicate_bb_p (bb))
+  if (!can_duplicate_block_p (bb))
     return false;
 
   /* Avoid duplicating blocks which have many successors (PR/13430).  */
-  n_succ = 0;
-  for (e = bb->succ; e; e = e->succ_next)
-    {
-      n_succ++;
-      if (n_succ > 8)
-       return false;
-    }
+  if (EDGE_COUNT (bb->succs) > 8)
+    return false;
 
   if (code_may_grow && maybe_hot_bb_p (bb))
     max_size *= 8;
@@ -1063,10 +1234,713 @@ get_uncond_jump_length (void)
   return length;
 }
 
-/* Reorder basic blocks.  The main entry point to this file.  */
+static void
+add_unlikely_executed_notes (void)
+{
+  basic_block bb;
+
+  /* Add the UNLIKELY_EXECUTED_NOTES to each cold basic block.  */
+
+  FOR_EACH_BB (bb)
+    if (BB_PARTITION (bb) == BB_COLD_PARTITION)
+      mark_bb_for_unlikely_executed_section (bb);
+}
+
+/* Find the basic blocks that are rarely executed and need to be moved to
+   a separate section of the .o file (to cut down on paging and improve
+   cache locality).  */
+
+static void
+find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, 
+                                                     int *n_crossing_edges, 
+                                                     int *max_idx)
+{
+  basic_block bb;
+  bool has_hot_blocks = false;
+  edge e;
+  int i;
+  edge_iterator ei;
+
+  /* Mark which partition (hot/cold) each basic block belongs in.  */
+  
+  FOR_EACH_BB (bb)
+    {
+      if (probably_never_executed_bb_p (bb))
+       BB_SET_PARTITION (bb, BB_COLD_PARTITION);
+      else
+       {
+         BB_SET_PARTITION (bb, BB_HOT_PARTITION);
+         has_hot_blocks = true;
+       }
+    }
+
+  /* Since all "hot" basic blocks will eventually be scheduled before all
+     cold basic blocks, make *sure* the real function entry block is in
+     the hot partition (if there is one).  */
+  
+  if (has_hot_blocks)
+    FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+      if (e->dest->index >= 0)
+       {
+         BB_SET_PARTITION (e->dest, BB_HOT_PARTITION);
+         break;
+       }
+
+  /* Mark every edge that crosses between sections.  */
+
+  i = 0;
+  if (targetm.have_named_sections)
+    {
+      FOR_EACH_BB (bb)
+        FOR_EACH_EDGE (e, ei, bb->succs)
+         {
+           if (e->src != ENTRY_BLOCK_PTR
+               && e->dest != EXIT_BLOCK_PTR
+               && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
+             {
+               e->flags |= EDGE_CROSSING;
+               if (i == *max_idx)
+                 {
+                   *max_idx *= 2;
+                   crossing_edges = xrealloc (crossing_edges,
+                                              (*max_idx) * sizeof (edge));
+                 }
+               crossing_edges[i++] = e;
+             }
+           else
+             e->flags &= ~EDGE_CROSSING;
+         }
+    }
+  *n_crossing_edges = i;
+}
+
+/* Add NOTE_INSN_UNLIKELY_EXECUTED_CODE to top of basic block.   This note
+   is later used to mark the basic block to be put in the 
+   unlikely-to-be-executed section of the .o file.  */
+
+static void
+mark_bb_for_unlikely_executed_section (basic_block bb) 
+{
+  rtx cur_insn;
+  rtx insert_insn = NULL;
+  rtx new_note;
+  
+  /* Insert new NOTE immediately after  BASIC_BLOCK note.  */
+
+  for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
+       cur_insn = NEXT_INSN (cur_insn))
+    if (GET_CODE (cur_insn) == NOTE
+       && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK)
+      {
+       insert_insn = cur_insn;
+       break;
+      }
+    
+  /* If basic block does not contain a NOTE_INSN_BASIC_BLOCK, there is
+     a major problem.  */
+  gcc_assert (insert_insn);
+
+  /* Insert note and assign basic block number to it.  */
+  
+  new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, 
+                             insert_insn);
+  NOTE_BASIC_BLOCK (new_note) = bb;
+}
+
+/* If any destination of a crossing edge does not have a label, add label;
+   Convert any fall-through crossing edges (for blocks that do not contain
+   a jump) to unconditional jumps.  */
+
+static void 
+add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
+{
+  int i;
+  basic_block src;
+  basic_block dest;
+  rtx label;
+  rtx barrier;
+  rtx new_jump;
+  
+  for (i=0; i < n_crossing_edges; i++) 
+    {
+      if (crossing_edges[i]) 
+       {
+         src = crossing_edges[i]->src; 
+         dest = crossing_edges[i]->dest;
+         
+         /* Make sure dest has a label.  */
+         
+         if (dest && (dest != EXIT_BLOCK_PTR))
+           {
+             label = block_label (dest);
+             
+             /* Make sure source block ends with a jump.  */
+             
+             if (src && (src != ENTRY_BLOCK_PTR)) 
+               {
+                 if (!JUMP_P (BB_END (src)))
+                   /* bb just falls through.  */
+                   {
+                     /* make sure there's only one successor */
+                     gcc_assert (EDGE_COUNT (src->succs) == 1);
+                     
+                     /* Find label in dest block.  */
+                     label = block_label (dest);
+                     
+                     new_jump = emit_jump_insn_after (gen_jump (label), 
+                                                      BB_END (src));
+                     barrier = emit_barrier_after (new_jump);
+                     JUMP_LABEL (new_jump) = label;
+                     LABEL_NUSES (label) += 1;
+                     src->rbi->footer = unlink_insn_chain (barrier, barrier);
+                     /* Mark edge as non-fallthru.  */
+                     crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
+                   } /* end: 'if (GET_CODE ... '  */
+               } /* end: 'if (src && src->index...'  */
+           } /* end: 'if (dest && dest->index...'  */
+       } /* end: 'if (crossing_edges[i]...'  */
+    } /* end for loop  */
+}
+
+/* Find any bb's where the fall-through edge is a crossing edge (note that
+   these bb's must also contain a conditional jump; we've already
+   dealt with fall-through edges for blocks that didn't have a
+   conditional jump in the call to add_labels_and_missing_jumps).
+   Convert the fall-through edge to non-crossing edge by inserting a
+   new bb to fall-through into.  The new bb will contain an
+   unconditional jump (crossing edge) to the original fall through
+   destination.  */
+
+static void 
+fix_up_fall_thru_edges (void)
+{
+  basic_block cur_bb;
+  basic_block new_bb;
+  edge succ1;
+  edge succ2;
+  edge fall_thru;
+  edge cond_jump = NULL;
+  edge e;
+  bool cond_jump_crosses;
+  int invert_worked;
+  rtx old_jump;
+  rtx fall_thru_label;
+  rtx barrier;
+  
+  FOR_EACH_BB (cur_bb)
+    {
+      fall_thru = NULL;
+      if (EDGE_COUNT (cur_bb->succs) > 0)
+       succ1 = EDGE_SUCC (cur_bb, 0);
+      else
+       succ1 = NULL;
+
+      if (EDGE_COUNT (cur_bb->succs) > 1)
+       succ2 = EDGE_SUCC (cur_bb, 1);
+      else
+       succ2 = NULL;
+      
+      /* Find the fall-through edge.  */
+      
+      if (succ1 
+         && (succ1->flags & EDGE_FALLTHRU))
+       {
+         fall_thru = succ1;
+         cond_jump = succ2;
+       }
+      else if (succ2 
+              && (succ2->flags & EDGE_FALLTHRU))
+       {
+         fall_thru = succ2;
+         cond_jump = succ1;
+       }
+      
+      if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
+       {
+         /* Check to see if the fall-thru edge is a crossing edge.  */
+       
+         if (fall_thru->flags & EDGE_CROSSING)
+           {
+             /* The fall_thru edge crosses; now check the cond jump edge, if
+                it exists.  */
+             
+             cond_jump_crosses = true;
+             invert_worked  = 0;
+             old_jump = BB_END (cur_bb);
+             
+             /* Find the jump instruction, if there is one.  */
+             
+             if (cond_jump)
+               {
+                 if (!(cond_jump->flags & EDGE_CROSSING))
+                   cond_jump_crosses = false;
+                 
+                 /* We know the fall-thru edge crosses; if the cond
+                    jump edge does NOT cross, and its destination is the
+                    next block in the bb order, invert the jump
+                    (i.e. fix it so the fall thru does not cross and
+                    the cond jump does).  */
+                 
+                 if (!cond_jump_crosses
+                     && cur_bb->rbi->next == cond_jump->dest)
+                   {
+                     /* Find label in fall_thru block. We've already added
+                        any missing labels, so there must be one.  */
+                     
+                     fall_thru_label = block_label (fall_thru->dest);
+
+                     if (old_jump && fall_thru_label)
+                       invert_worked = invert_jump (old_jump, 
+                                                    fall_thru_label,0);
+                     if (invert_worked)
+                       {
+                         fall_thru->flags &= ~EDGE_FALLTHRU;
+                         cond_jump->flags |= EDGE_FALLTHRU;
+                         update_br_prob_note (cur_bb);
+                         e = fall_thru;
+                         fall_thru = cond_jump;
+                         cond_jump = e;
+                         cond_jump->flags |= EDGE_CROSSING;
+                         fall_thru->flags &= ~EDGE_CROSSING;
+                       }
+                   }
+               }
+             
+             if (cond_jump_crosses || !invert_worked)
+               {
+                 /* This is the case where both edges out of the basic
+                    block are crossing edges. Here we will fix up the
+                    fall through edge. The jump edge will be taken care
+                    of later.  */
+                 
+                 new_bb = force_nonfallthru (fall_thru);  
+                 
+                 if (new_bb)
+                   {
+                     new_bb->rbi->next = cur_bb->rbi->next;
+                     cur_bb->rbi->next = new_bb;
+                     
+                     /* Make sure new fall-through bb is in same 
+                        partition as bb it's falling through from.  */
+
+                     BB_COPY_PARTITION (new_bb, cur_bb);
+                     EDGE_SUCC (new_bb, 0)->flags |= EDGE_CROSSING;
+                   }
+                 
+                 /* Add barrier after new jump */
+                 
+                 if (new_bb)
+                   {
+                     barrier = emit_barrier_after (BB_END (new_bb));
+                     new_bb->rbi->footer = unlink_insn_chain (barrier, 
+                                                              barrier);
+                   }
+                 else
+                   {
+                     barrier = emit_barrier_after (BB_END (cur_bb));
+                     cur_bb->rbi->footer = unlink_insn_chain (barrier,
+                                                              barrier);
+                   }
+               }
+           }
+       }
+    }
+}
+
+/* This function checks the destination blockof a "crossing jump" to
+   see if it has any crossing predecessors that begin with a code label
+   and end with an unconditional jump.  If so, it returns that predecessor
+   block.  (This is to avoid creating lots of new basic blocks that all
+   contain unconditional jumps to the same destination).  */
+
+static basic_block
+find_jump_block (basic_block jump_dest) 
+{ 
+  basic_block source_bb = NULL; 
+  edge e;
+  rtx insn;
+  edge_iterator ei;
+
+  FOR_EACH_EDGE (e, ei, jump_dest->preds)
+    if (e->flags & EDGE_CROSSING)
+      {
+       basic_block src = e->src;
+       
+       /* Check each predecessor to see if it has a label, and contains
+          only one executable instruction, which is an unconditional jump.
+          If so, we can use it.  */
+       
+       if (LABEL_P (BB_HEAD (src)))
+         for (insn = BB_HEAD (src); 
+              !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
+              insn = NEXT_INSN (insn))
+           {
+             if (INSN_P (insn)
+                 && insn == BB_END (src)
+                 && JUMP_P (insn)
+                 && !any_condjump_p (insn))
+               {
+                 source_bb = src;
+                 break;
+               }
+           }
+       
+       if (source_bb)
+         break;
+      }
+
+  return source_bb;
+}
+
+/* Find all BB's with conditional jumps that are crossing edges;
+   insert a new bb and make the conditional jump branch to the new
+   bb instead (make the new bb same color so conditional branch won't
+   be a 'crossing' edge).  Insert an unconditional jump from the
+   new bb to the original destination of the conditional jump.  */
+
+static void
+fix_crossing_conditional_branches (void)
+{
+  basic_block cur_bb;
+  basic_block new_bb;
+  basic_block last_bb;
+  basic_block dest;
+  basic_block prev_bb;
+  edge succ1;
+  edge succ2;
+  edge crossing_edge;
+  edge new_edge;
+  rtx old_jump;
+  rtx set_src;
+  rtx old_label = NULL_RTX;
+  rtx new_label;
+  rtx new_jump;
+  rtx barrier;
+
+ last_bb = EXIT_BLOCK_PTR->prev_bb;
+  
+  FOR_EACH_BB (cur_bb)
+    {
+      crossing_edge = NULL;
+      if (EDGE_COUNT (cur_bb->succs) > 0)
+       succ1 = EDGE_SUCC (cur_bb, 0);
+      else
+       succ1 = NULL;
+    
+      if (EDGE_COUNT (cur_bb->succs) > 1)
+       succ2 = EDGE_SUCC (cur_bb, 1);
+      else
+       succ2 = NULL;
+      
+      /* We already took care of fall-through edges, so only one successor
+        can be a crossing edge.  */
+      
+      if (succ1 && (succ1->flags & EDGE_CROSSING))
+       crossing_edge = succ1;
+      else if (succ2 && (succ2->flags & EDGE_CROSSING))
+       crossing_edge = succ2;
+      
+      if (crossing_edge) 
+       {
+         old_jump = BB_END (cur_bb);
+         
+         /* Check to make sure the jump instruction is a
+            conditional jump.  */
+         
+         set_src = NULL_RTX;
+
+         if (any_condjump_p (old_jump))
+           {
+             if (GET_CODE (PATTERN (old_jump)) == SET)
+               set_src = SET_SRC (PATTERN (old_jump));
+             else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
+               {
+                 set_src = XVECEXP (PATTERN (old_jump), 0,0);
+                 if (GET_CODE (set_src) == SET)
+                   set_src = SET_SRC (set_src);
+                 else
+                   set_src = NULL_RTX;
+               }
+           }
+
+         if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
+           {
+             if (GET_CODE (XEXP (set_src, 1)) == PC)
+               old_label = XEXP (set_src, 2);
+             else if (GET_CODE (XEXP (set_src, 2)) == PC)
+               old_label = XEXP (set_src, 1);
+             
+             /* Check to see if new bb for jumping to that dest has
+                already been created; if so, use it; if not, create
+                a new one.  */
+
+             new_bb = find_jump_block (crossing_edge->dest);
+             
+             if (new_bb)
+               new_label = block_label (new_bb);
+             else
+               {
+                 /* Create new basic block to be dest for
+                    conditional jump.  */
+                 
+                 new_bb = create_basic_block (NULL, NULL, last_bb);
+                 new_bb->rbi->next = last_bb->rbi->next;
+                 last_bb->rbi->next = new_bb;
+                 prev_bb = last_bb;
+                 last_bb = new_bb;
+                 
+                 /* Update register liveness information.  */
+                 
+                 new_bb->global_live_at_start = 
+                   OBSTACK_ALLOC_REG_SET (&flow_obstack);
+                 new_bb->global_live_at_end = 
+                   OBSTACK_ALLOC_REG_SET (&flow_obstack);
+                 COPY_REG_SET (new_bb->global_live_at_end,
+                               prev_bb->global_live_at_end);
+                 COPY_REG_SET (new_bb->global_live_at_start,
+                               prev_bb->global_live_at_end);
+                 
+                 /* Put appropriate instructions in new bb.  */
+                 
+                 new_label = gen_label_rtx ();
+                 emit_label_before (new_label, BB_HEAD (new_bb));
+                 BB_HEAD (new_bb) = new_label;
+                 
+                 if (GET_CODE (old_label) == LABEL_REF)
+                   {
+                     old_label = JUMP_LABEL (old_jump);
+                     new_jump = emit_jump_insn_after (gen_jump 
+                                                      (old_label), 
+                                                      BB_END (new_bb));
+                   }
+                 else
+                   {
+                     gcc_assert (HAVE_return
+                                 && GET_CODE (old_label) == RETURN);
+                     new_jump = emit_jump_insn_after (gen_return (), 
+                                                      BB_END (new_bb));
+                   }
+                 
+                 barrier = emit_barrier_after (new_jump);
+                 JUMP_LABEL (new_jump) = old_label;
+                 new_bb->rbi->footer = unlink_insn_chain (barrier, 
+                                                          barrier);
+                 
+                 /* Make sure new bb is in same partition as source
+                    of conditional branch.  */
+                 BB_COPY_PARTITION (new_bb, cur_bb);
+               }
+             
+             /* Make old jump branch to new bb.  */
+             
+             redirect_jump (old_jump, new_label, 0);
+             
+             /* Remove crossing_edge as predecessor of 'dest'.  */
+             
+             dest = crossing_edge->dest;
+             
+             redirect_edge_succ (crossing_edge, new_bb);
+             
+             /* Make a new edge from new_bb to old dest; new edge
+                will be a successor for new_bb and a predecessor
+                for 'dest'.  */
+             
+             if (EDGE_COUNT (new_bb->succs) == 0)
+               new_edge = make_edge (new_bb, dest, 0);
+             else
+               new_edge = EDGE_SUCC (new_bb, 0);
+             
+             crossing_edge->flags &= ~EDGE_CROSSING;
+             new_edge->flags |= EDGE_CROSSING;
+           }
+       }
+    }
+}
+
+/* Find any unconditional branches that cross between hot and cold
+   sections.  Convert them into indirect jumps instead.  */
+
+static void
+fix_crossing_unconditional_branches (void)
+{
+  basic_block cur_bb;
+  rtx last_insn;
+  rtx label;
+  rtx label_addr;
+  rtx indirect_jump_sequence;
+  rtx jump_insn = NULL_RTX;
+  rtx new_reg;
+  rtx cur_insn;
+  edge succ;
+
+  FOR_EACH_BB (cur_bb)
+    {
+      last_insn = BB_END (cur_bb);
+      succ = EDGE_SUCC (cur_bb, 0);
+
+      /* Check to see if bb ends in a crossing (unconditional) jump.  At
+         this point, no crossing jumps should be conditional.  */
+
+      if (JUMP_P (last_insn)
+         && (succ->flags & EDGE_CROSSING))
+       {
+         rtx label2, table;
+
+         gcc_assert (!any_condjump_p (last_insn));
+
+         /* Make sure the jump is not already an indirect or table jump.  */
+
+         if (!computed_jump_p (last_insn)
+             && !tablejump_p (last_insn, &label2, &table))
+           {
+             /* We have found a "crossing" unconditional branch.  Now
+                we must convert it to an indirect jump.  First create
+                reference of label, as target for jump.  */
+             
+             label = JUMP_LABEL (last_insn);
+             label_addr = gen_rtx_LABEL_REF (Pmode, label);
+             LABEL_NUSES (label) += 1;
+             
+             /* Get a register to use for the indirect jump.  */
+             
+             new_reg = gen_reg_rtx (Pmode);
+             
+             /* Generate indirect the jump sequence.  */
+             
+             start_sequence ();
+             emit_move_insn (new_reg, label_addr);
+             emit_indirect_jump (new_reg);
+             indirect_jump_sequence = get_insns ();
+             end_sequence ();
+             
+             /* Make sure every instruction in the new jump sequence has
+                its basic block set to be cur_bb.  */
+             
+             for (cur_insn = indirect_jump_sequence; cur_insn;
+                  cur_insn = NEXT_INSN (cur_insn))
+               {
+                 BLOCK_FOR_INSN (cur_insn) = cur_bb;
+                 if (JUMP_P (cur_insn))
+                   jump_insn = cur_insn;
+               }
+             
+             /* Insert the new (indirect) jump sequence immediately before
+                the unconditional jump, then delete the unconditional jump.  */
+             
+             emit_insn_before (indirect_jump_sequence, last_insn);
+             delete_insn (last_insn);
+             
+             /* Make BB_END for cur_bb be the jump instruction (NOT the
+                barrier instruction at the end of the sequence...).  */
+             
+             BB_END (cur_bb) = jump_insn;
+           }
+       }
+    }
+}
+
+/* Add REG_CROSSING_JUMP note to all crossing jump insns.  */
+
+static void
+add_reg_crossing_jump_notes (void)
+{
+  basic_block bb;
+  edge e;
+  edge_iterator ei;
+
+  FOR_EACH_BB (bb)
+    FOR_EACH_EDGE (e, ei, bb->succs)
+      if ((e->flags & EDGE_CROSSING)
+         && JUMP_P (BB_END (e->src)))
+       REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, 
+                                                        NULL_RTX, 
+                                                        REG_NOTES (BB_END 
+                                                                 (e->src)));
+}
+
+/* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be
+   put in a separate section of the .o file, to reduce paging and
+   improve cache performance (hopefully).  This can result in bits of
+   code from the same function being widely separated in the .o file.
+   However this is not obvious to the current bb structure.  Therefore
+   we must take care to ensure that: 1). There are no fall_thru edges
+   that cross between sections;  2). For those architectures which
+   have "short" conditional branches, all conditional branches that
+   attempt to cross between sections are converted to unconditional
+   branches; and, 3). For those architectures which have "short"
+   unconditional branches, all unconditional branches that attempt
+   to cross between sections are converted to indirect jumps.
+   
+   The code for fixing up fall_thru edges that cross between hot and
+   cold basic blocks does so by creating new basic blocks containing 
+   unconditional branches to the appropriate label in the "other" 
+   section.  The new basic block is then put in the same (hot or cold)
+   section as the original conditional branch, and the fall_thru edge
+   is modified to fall into the new basic block instead.  By adding
+   this level of indirection we end up with only unconditional branches
+   crossing between hot and cold sections.  
+   
+   Conditional branches are dealt with by adding a level of indirection.
+   A new basic block is added in the same (hot/cold) section as the 
+   conditional branch, and the conditional branch is retargeted to the
+   new basic block.  The new basic block contains an unconditional branch
+   to the original target of the conditional branch (in the other section).
+
+   Unconditional branches are dealt with by converting them into
+   indirect jumps.  */
+
+static void 
+fix_edges_for_rarely_executed_code (edge *crossing_edges, 
+                                   int n_crossing_edges)
+{
+  /* Make sure the source of any crossing edge ends in a jump and the
+     destination of any crossing edge has a label.  */
+  
+  add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
+  
+  /* Convert all crossing fall_thru edges to non-crossing fall
+     thrus to unconditional jumps (that jump to the original fall
+     thru dest).  */
+  
+  fix_up_fall_thru_edges ();
+  
+  /* Only do the parts necessary for writing separate sections if
+     the target architecture has the ability to write separate sections
+     (i.e. it has named sections).  Otherwise, the hot/cold partitioning
+     information will be used when reordering blocks to try to put all
+     the hot blocks together, then all the cold blocks, but no actual
+     section partitioning will be done.  */
+
+  if (targetm.have_named_sections)
+    {
+      /* If the architecture does not have conditional branches that can
+        span all of memory, convert crossing conditional branches into
+        crossing unconditional branches.  */
+  
+      if (!HAS_LONG_COND_BRANCH)
+       fix_crossing_conditional_branches ();
+  
+      /* If the architecture does not have unconditional branches that
+        can span all of memory, convert crossing unconditional branches
+        into indirect jumps.  Since adding an indirect jump also adds
+        a new register usage, update the register usage information as
+        well.  */
+      
+      if (!HAS_LONG_UNCOND_BRANCH)
+       {
+         fix_crossing_unconditional_branches ();
+         reg_scan (get_insns(), max_reg_num (), 1);
+       }
+
+      add_reg_crossing_jump_notes ();
+    }
+}
+
+/* Reorder basic blocks.  The main entry point to this file.  FLAGS is
+   the set of flags to pass to cfg_layout_initialize().  */
 
 void
-reorder_basic_blocks (void)
+reorder_basic_blocks (unsigned int flags)
 {
   int n_traces;
   int i;
@@ -1075,12 +1949,12 @@ reorder_basic_blocks (void)
   if (n_basic_blocks <= 1)
     return;
 
-  if ((* targetm.cannot_modify_jumps_p) ())
+  if (targetm.cannot_modify_jumps_p ())
     return;
 
   timevar_push (TV_REORDER_BLOCKS);
 
-  cfg_layout_initialize ();
+  cfg_layout_initialize (flags);
 
   set_edge_can_fallthru_flag ();
   mark_dfs_back_edges ();
@@ -1111,7 +1985,102 @@ reorder_basic_blocks (void)
   if (dump_file)
     dump_flow_info (dump_file);
 
+  if (flag_reorder_blocks_and_partition
+      && targetm.have_named_sections)
+    add_unlikely_executed_notes ();
+
   cfg_layout_finalize ();
 
   timevar_pop (TV_REORDER_BLOCKS);
 }
+
+/* This function is the main 'entrance' for the optimization that
+   partitions hot and cold basic blocks into separate sections of the
+   .o file (to improve performance and cache locality).  Ideally it
+   would be called after all optimizations that rearrange the CFG have
+   been called.  However part of this optimization may introduce new
+   register usage, so it must be called before register allocation has
+   occurred.  This means that this optimization is actually called
+   well before the optimization that reorders basic blocks (see
+   function above).
+
+   This optimization checks the feedback information to determine
+   which basic blocks are hot/cold and causes reorder_basic_blocks to
+   add NOTE_INSN_UNLIKELY_EXECUTED_CODE to non-hot basic blocks.  The
+   presence or absence of this note is later used for writing out
+   sections in the .o file.  Because hot and cold sections can be
+   arbitrarily large (within the bounds of memory), far beyond the
+   size of a single function, it is necessary to fix up all edges that
+   cross section boundaries, to make sure the instructions used can
+   actually span the required distance.  The fixes are described
+   below.
+
+   Fall-through edges must be changed into jumps; it is not safe or
+   legal to fall through across a section boundary.  Whenever a
+   fall-through edge crossing a section boundary is encountered, a new
+   basic block is inserted (in the same section as the fall-through
+   source), and the fall through edge is redirected to the new basic
+   block.  The new basic block contains an unconditional jump to the
+   original fall-through target.  (If the unconditional jump is
+   insufficient to cross section boundaries, that is dealt with a
+   little later, see below).
+
+   In order to deal with architectures that have short conditional
+   branches (which cannot span all of memory) we take any conditional
+   jump that attempts to cross a section boundary and add a level of
+   indirection: it becomes a conditional jump to a new basic block, in
+   the same section.  The new basic block contains an unconditional
+   jump to the original target, in the other section.
+
+   For those architectures whose unconditional branch is also
+   incapable of reaching all of memory, those unconditional jumps are
+   converted into indirect jumps, through a register.
+
+   IMPORTANT NOTE: This optimization causes some messy interactions
+   with the cfg cleanup optimizations; those optimizations want to
+   merge blocks wherever possible, and to collapse indirect jump
+   sequences (change "A jumps to B jumps to C" directly into "A jumps
+   to C").  Those optimizations can undo the jump fixes that
+   partitioning is required to make (see above), in order to ensure
+   that jumps attempting to cross section boundaries are really able
+   to cover whatever distance the jump requires (on many architectures
+   conditional or unconditional jumps are not able to reach all of
+   memory).  Therefore tests have to be inserted into each such
+   optimization to make sure that it does not undo stuff necessary to
+   cross partition boundaries.  This would be much less of a problem
+   if we could perform this optimization later in the compilation, but
+   unfortunately the fact that we may need to create indirect jumps
+   (through registers) requires that this optimization be performed
+   before register allocation.  */
+
+void
+partition_hot_cold_basic_blocks (void)
+{
+  basic_block cur_bb;
+  edge *crossing_edges;
+  int n_crossing_edges;
+  int max_edges = 2 * last_basic_block;
+  
+  if (n_basic_blocks <= 1)
+    return;
+  
+  crossing_edges = xcalloc (max_edges, sizeof (edge));
+
+  cfg_layout_initialize (0);
+  
+  FOR_EACH_BB (cur_bb)
+    if (cur_bb->index >= 0
+       && cur_bb->next_bb->index >= 0)
+      cur_bb->rbi->next = cur_bb->next_bb;
+  
+  find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges, 
+                                                       &n_crossing_edges, 
+                                                       &max_edges);
+
+  if (n_crossing_edges > 0)
+    fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
+  
+  free (crossing_edges);
+
+  cfg_layout_finalize();
+}