OSDN Git Service

2004-10-05 Andrew Pinski <pinskia@physics.uc.edu>
[pf3gnuchains/gcc-fork.git] / gcc / bb-reorder.c
index c0c808a..1d0b097 100644 (file)
 /* Basic block reordering routines for the GNU compiler.
-   Copyright (C) 2000 Free Software Foundation, Inc.
+   Copyright (C) 2000, 2002, 2003, 2004 Free Software Foundation, Inc.
 
-   This file is part of GNU CC.
+   This file is part of GCC.
 
-   GNU CC is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2, or (at your option)
    any later version.
 
-   GNU CC is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
 
    You should have received a copy of the GNU General Public License
-   along with GNU CC; see the file COPYING.  If not, write to
-   the Free Software Foundation, 59 Temple Place - Suite 330,
-   Boston, MA 02111-1307, USA.  */
+   along with GCC; see the file COPYING.  If not, write to the Free
+   Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+   02111-1307, USA.  */
+
+/* This (greedy) algorithm constructs traces in several rounds.
+   The construction starts from "seeds".  The seed for the first round
+   is the entry point of function.  When there are more than one seed
+   that one is selected first that has the lowest key in the heap
+   (see function bb_to_key).  Then the algorithm repeatedly adds the most
+   probable successor to the end of a trace.  Finally it connects the traces.
+
+   There are two parameters: Branch Threshold and Exec Threshold.
+   If the edge to a successor of the actual basic block is lower than
+   Branch Threshold or the frequency of the successor is lower than
+   Exec Threshold the successor will be the seed in one of the next rounds.
+   Each round has these parameters lower than the previous one.
+   The last round has to have these parameters set to zero
+   so that the remaining blocks are picked up.
+
+   The algorithm selects the most probable successor from all unvisited
+   successors and successors that have been added to this trace.
+   The other successors (that has not been "sent" to the next round) will be
+   other seeds for this round and the secondary traces will start in them.
+   If the successor has not been visited in this trace it is added to the trace
+   (however, there is some heuristic for simple branches).
+   If the successor has been visited in this trace the loop has been found.
+   If the loop has many iterations the loop is rotated so that the
+   source block of the most probable edge going out from the loop
+   is the last block of the trace.
+   If the loop has few iterations and there is no edge from the last block of
+   the loop going out from loop the loop header is duplicated.
+   Finally, the construction of the trace is terminated.
+
+   When connecting traces it first checks whether there is an edge from the
+   last block of one trace to the first block of another trace.
+   When there are still some unconnected traces it checks whether there exists
+   a basic block BB such that BB is a successor of the last bb of one trace
+   and BB is a predecessor of the first block of another trace. In this case,
+   BB is duplicated and the traces are connected through this duplicate.
+   The rest of traces are simply connected so there will be a jump to the
+   beginning of the rest of trace.
+
+
+   References:
+
+   "Software Trace Cache"
+   A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
+   http://citeseer.nj.nec.com/15361.html
 
-/* References:
-
-   "Profile Guided Code Positioning"
-   Pettis and Hanson; PLDI '90.
 */
 
 #include "config.h"
 #include "system.h"
-#include "tree.h"
+#include "coretypes.h"
+#include "tm.h"
 #include "rtl.h"
-#include "tm_p.h"
 #include "basic-block.h"
-#include "insn-config.h"
-#include "regs.h"
-#include "hard-reg-set.h"
 #include "flags.h"
+#include "timevar.h"
 #include "output.h"
+#include "cfglayout.h"
+#include "fibheap.h"
+#include "target.h"
 #include "function.h"
-#include "except.h"
-#include "toplev.h"
-#include "recog.h"
-#include "insn-flags.h"
-#include "expr.h"
+#include "tm_p.h"
 #include "obstack.h"
+#include "expr.h"
+#include "regs.h"
 
+/* The number of rounds.  In most cases there will only be 4 rounds, but
+   when partitioning hot and cold basic blocks into separate sections of
+   the .o file there will be an extra round.*/
+#define N_ROUNDS 5
 
-/* The contents of the current function definition are allocated
-   in this obstack, and all are freed at the end of the function.
-   For top-level functions, this is temporary_obstack.
-   Separate obstacks are made for nested functions.  */
-
-extern struct obstack *function_obstack;
-
-
-/* Structure to hold information about lexical scopes.  */
-typedef struct scope_def
-{
-  int level;
-
-  /* The NOTE_INSN_BLOCK_BEG that started this scope.  */
-  rtx note_beg;
-
-  /* The NOTE_INSN_BLOCK_END that ended this scope.  */
-  rtx note_end;
-
-  /* The bb containing note_beg (if any).  */
-  basic_block bb_beg;
-
-  /* The bb containing note_end (if any).  */
-  basic_block bb_end;
+/* Stubs in case we don't have a return insn.
+   We have to check at runtime too, not only compiletime.  */  
 
-  /* List of basic blocks contained within this scope.  */
-  basic_block *bbs;
+#ifndef HAVE_return
+#define HAVE_return 0
+#define gen_return() NULL_RTX
+#endif
 
-  /* Number of blocks contained within this scope.  */
-  int num_bbs;
 
-  /* The outer scope or NULL if outermost scope.  */
-  struct scope_def *outer;
+/* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE.  */
+static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
 
-  /* The first inner scope or NULL if innermost scope.  */
-  struct scope_def *inner;
+/* Exec thresholds in thousandths (per mille) of the frequency of bb 0.  */
+static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
 
-  /* The last inner scope or NULL if innermost scope.  */
-  struct scope_def *inner_last;
+/* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
+   block the edge destination is not duplicated while connecting traces.  */
+#define DUPLICATION_THRESHOLD 100
 
-  /* Link to the next (sibling) scope.  */
-  struct scope_def *next;
-} *scope;
+/* Length of unconditional jump instruction.  */
+static int uncond_jump_length;
 
-/* Structure to hold information about the scope forest.  */
-typedef struct
+/* Structure to hold needed information for each basic block.  */
+typedef struct bbro_basic_block_data_def
 {
-  /* Number of trees in forest.  */
-  int num_trees;
-
-  /* List of tree roots.  */
-  scope *trees;
-} scope_forest_info;
-
-
-typedef struct reorder_block_def {
-  int flags;
-  int index;
-  basic_block add_jump;
-  rtx eff_head;
-  rtx eff_end;
-  scope scope;
-} *reorder_block_def;
-
-static struct reorder_block_def rbd_init
-= {
-    0,                 /* flags */
-    0,                 /* index */
-    NULL,              /* add_jump */
-    NULL_RTX,          /* eff_head */
-    NULL_RTX,          /* eff_end */
-    NULL               /* scope */
-};
+  /* Which trace is the bb start of (-1 means it is not a start of a trace).  */
+  int start_of_trace;
 
+  /* Which trace is the bb end of (-1 means it is not an end of a trace).  */
+  int end_of_trace;
 
-#define REORDER_BLOCK_HEAD     0x1
-#define REORDER_BLOCK_VISITED  0x2
-  
-#define REORDER_BLOCK_FLAGS(bb) \
-  ((reorder_block_def) (bb)->aux)->flags
+  /* Which heap is BB in (if any)?  */
+  fibheap_t heap;
+
+  /* Which heap node is BB in (if any)?  */
+  fibnode_t node;
+} bbro_basic_block_data;
 
-#define REORDER_BLOCK_INDEX(bb) \
-  ((reorder_block_def) (bb)->aux)->index
+/* The current size of the following dynamic array.  */
+static int array_size;
 
-#define REORDER_BLOCK_ADD_JUMP(bb) \
-  ((reorder_block_def) (bb)->aux)->add_jump
+/* The array which holds needed information for basic blocks.  */
+static bbro_basic_block_data *bbd;
 
-#define REORDER_BLOCK_EFF_HEAD(bb) \
-  ((reorder_block_def) (bb)->aux)->eff_head
+/* To avoid frequent reallocation the size of arrays is greater than needed,
+   the number of elements is (not less than) 1.25 * size_wanted.  */
+#define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
 
-#define REORDER_BLOCK_EFF_END(bb) \
-  ((reorder_block_def) (bb)->aux)->eff_end
+/* Free the memory and set the pointer to NULL.  */
+#define FREE(P) (gcc_assert (P), free (P), P = 0)
 
-#define REORDER_BLOCK_SCOPE(bb) \
-  ((reorder_block_def) (bb)->aux)->scope
+/* Structure for holding information about a trace.  */
+struct trace
+{
+  /* First and last basic block of the trace.  */
+  basic_block first, last;
 
+  /* The round of the STC creation which this trace was found in.  */
+  int round;
 
-static int reorder_index;
-static basic_block reorder_last_visited;
+  /* The length (i.e. the number of basic blocks) of the trace.  */
+  int length;
+};
 
+/* Maximum frequency and count of one of the entry blocks.  */
+int max_entry_frequency;
+gcov_type max_entry_count;
 
 /* Local function prototypes.  */
-static rtx skip_insns_after_block      PARAMS ((basic_block));
-static basic_block get_common_dest     PARAMS ((basic_block, basic_block));
-static basic_block chain_reorder_blocks        PARAMS ((edge, basic_block));
-static void make_reorder_chain         PARAMS ((basic_block));
-static void fixup_reorder_chain                PARAMS ((void));
-#ifdef ENABLE_CHECKING
-static void verify_insn_chain          PARAMS ((void));
-#endif
-static void relate_bbs_with_scopes     PARAMS ((scope));
-static scope make_new_scope            PARAMS ((int, rtx));
-static void build_scope_forest         PARAMS ((scope_forest_info *));
-static void remove_scope_notes         PARAMS ((void));
-static void insert_intra_1             PARAMS ((scope, rtx *));
-static void insert_intra_bb_scope_notes PARAMS ((basic_block));
-static void insert_inter_bb_scope_notes PARAMS ((basic_block, basic_block));
-static void rebuild_scope_notes                PARAMS ((scope_forest_info *));
-static void free_scope_forest_1                PARAMS ((scope));
-static void free_scope_forest          PARAMS ((scope_forest_info *));
-void dump_scope_forest                 PARAMS ((scope_forest_info *));
-static void dump_scope_forest_1                PARAMS ((scope, int));
-static rtx get_next_bb_note            PARAMS ((rtx));
-static rtx get_prev_bb_note            PARAMS ((rtx));
-
-/* Skip over inter-block insns occurring after BB which are typically
-   associated with BB (e.g., barriers). If there are any such insns,
-   we return the last one. Otherwise, we return the end of BB.  */
-
-static rtx
-skip_insns_after_block (bb)
-     basic_block bb;
+static void find_traces (int *, struct trace *);
+static basic_block rotate_loop (edge, struct trace *, int);
+static void mark_bb_visited (basic_block, int);
+static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
+                                int, fibheap_t *, int);
+static basic_block copy_bb (basic_block, edge, basic_block, int);
+static fibheapkey_t bb_to_key (basic_block);
+static bool better_edge_p (basic_block, edge, int, int, int, int, edge);
+static void connect_traces (int, struct trace *);
+static bool copy_bb_p (basic_block, int);
+static int get_uncond_jump_length (void);
+static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
+static void add_unlikely_executed_notes (void);
+static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *, 
+                                                                 int *,
+                                                                 int *);
+static void mark_bb_for_unlikely_executed_section  (basic_block);
+static void add_labels_and_missing_jumps (edge *, int);
+static void add_reg_crossing_jump_notes (void);
+static void fix_up_fall_thru_edges (void);
+static void fix_edges_for_rarely_executed_code (edge *, int);
+static void fix_crossing_conditional_branches (void);
+static void fix_crossing_unconditional_branches (void);
+\f
+/* Check to see if bb should be pushed into the next round of trace
+   collections or not.  Reasons for pushing the block forward are 1).
+   If the block is cold, we are doing partitioning, and there will be
+   another round (cold partition blocks are not supposed to be
+   collected into traces until the very last round); or 2). There will
+   be another round, and the basic block is not "hot enough" for the
+   current round of trace collection.  */
+
+static bool
+push_to_next_round_p (basic_block bb, int round, int number_of_rounds,
+                     int exec_th, gcov_type count_th)
 {
-  rtx insn, last_insn;
-
-  last_insn = bb->end;
+  bool there_exists_another_round;
+  bool cold_block;
+  bool block_not_hot_enough;
+  bool next_round_is_last;
+
+  there_exists_another_round = round < number_of_rounds - 1;
+  next_round_is_last = round + 1 == number_of_rounds - 1;
+
+  cold_block = (flag_reorder_blocks_and_partition 
+               && BB_PARTITION (bb) == BB_COLD_PARTITION);
+
+  block_not_hot_enough = (bb->frequency < exec_th 
+                         || bb->count < count_th
+                         || probably_never_executed_bb_p (bb));
+
+  if (flag_reorder_blocks_and_partition
+      && next_round_is_last
+      && BB_PARTITION (bb) != BB_COLD_PARTITION)
+    return false;
+  else if (there_exists_another_round
+      && (cold_block || block_not_hot_enough))
+    return true;
+  else 
+    return false;
+}
 
-  if (bb == EXIT_BLOCK_PTR)
-    return 0;
+/* Find the traces for Software Trace Cache.  Chain each trace through
+   RBI()->next.  Store the number of traces to N_TRACES and description of
+   traces to TRACES.  */
 
-  for (insn = NEXT_INSN (bb->end); 
-       insn;
-       last_insn = insn, insn = NEXT_INSN (insn))
+static void
+find_traces (int *n_traces, struct trace *traces)
+{
+  int i;
+  int number_of_rounds;
+  edge e;
+  edge_iterator ei;
+  fibheap_t heap;
+
+  /* Add one extra round of trace collection when partitioning hot/cold
+     basic blocks into separate sections.  The last round is for all the
+     cold blocks (and ONLY the cold blocks).  */
+
+  number_of_rounds = N_ROUNDS - 1;
+  if (flag_reorder_blocks_and_partition)
+    number_of_rounds = N_ROUNDS;
+
+  /* Insert entry points of function into heap.  */
+  heap = fibheap_new ();
+  max_entry_frequency = 0;
+  max_entry_count = 0;
+  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
     {
-      if (bb->index + 1 != n_basic_blocks
-         && insn == BASIC_BLOCK (bb->index + 1)->head)
-       break;
-
-      if (GET_CODE (insn) == BARRIER
-         || GET_CODE (insn) == JUMP_INSN 
-         || (GET_CODE (insn) == NOTE
-             && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
-                 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)))
-       continue;
-
-      if (GET_CODE (insn) == CODE_LABEL
-         && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
-         && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC
-             || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
-       {
-         insn = NEXT_INSN (insn);
-         continue;
-       }
-
-      /* Skip to next non-deleted insn.  */
-      if (GET_CODE (insn) == NOTE
-         && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED
-             || NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL))
-       continue; 
-
-      break;
+      bbd[e->dest->index].heap = heap;
+      bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
+                                                   e->dest);
+      if (e->dest->frequency > max_entry_frequency)
+       max_entry_frequency = e->dest->frequency;
+      if (e->dest->count > max_entry_count)
+       max_entry_count = e->dest->count;
     }
 
-  return last_insn;
-}
+  /* Find the traces.  */
+  for (i = 0; i < number_of_rounds; i++)
+    {
+      gcov_type count_threshold;
 
+      if (dump_file)
+       fprintf (dump_file, "STC - round %d\n", i + 1);
 
-/* Return common destination for blocks BB0 and BB1.  */
+      if (max_entry_count < INT_MAX / 1000)
+       count_threshold = max_entry_count * exec_threshold[i] / 1000;
+      else
+       count_threshold = max_entry_count / 1000 * exec_threshold[i];
 
-static basic_block
-get_common_dest (bb0, bb1)
-     basic_block bb0, bb1;
-{
-  edge e0, e1;
+      find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
+                          max_entry_frequency * exec_threshold[i] / 1000,
+                          count_threshold, traces, n_traces, i, &heap,
+                          number_of_rounds);
+    }
+  fibheap_delete (heap);
 
-  for (e0 = bb0->succ; e0; e0 = e0->succ_next)
+  if (dump_file)
     {
-      for (e1 = bb1->succ; e1; e1 = e1->succ_next)
+      for (i = 0; i < *n_traces; i++)
        {
-         if (e0->dest == e1->dest)
-           {
-             return e0->dest;
-           }
+         basic_block bb;
+         fprintf (dump_file, "Trace %d (round %d):  ", i + 1,
+                  traces[i].round + 1);
+         for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next)
+           fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
+         fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
        }
+      fflush (dump_file);
     }
-  return 0;
 }
 
-
-/* Move the destination block for edge E after chain end block CEB
-   Adding jumps and labels is deferred until fixup_reorder_chain.  */
+/* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
+   (with sequential number TRACE_N).  */
 
 static basic_block
-chain_reorder_blocks (e, ceb)
-     edge e;
-     basic_block ceb;
+rotate_loop (edge back_edge, struct trace *trace, int trace_n)
 {
-  basic_block sb = e->src;
-  basic_block db = e->dest;
-  rtx cebe_insn, dbh_insn, dbe_insn;
-  edge ee, last_edge;
-  edge e_fallthru, e_jump;
-
-  enum cond_types {NO_COND, PREDICT_THEN_WITH_ELSE, PREDICT_ELSE,
-                  PREDICT_THEN_NO_ELSE, PREDICT_NOT_THEN_NO_ELSE};
-  enum cond_types cond_type;
-  enum cond_block_types {NO_COND_BLOCK, THEN_BLOCK, ELSE_BLOCK,
-                        NO_ELSE_BLOCK};
-  enum cond_block_types cond_block_type;
-
-  if (rtl_dump_file)
-    fprintf (rtl_dump_file,
-            "Edge from basic block %d to basic block %d last visited %d\n",
-            sb->index, db->index, ceb->index);
-  cebe_insn = REORDER_BLOCK_EFF_END (ceb);
-
-  /* Blocks are in original order.  */
-  if (sb->index == ceb->index
-      && ceb->index + 1 == db->index && NEXT_INSN (cebe_insn))
-    return db;
-
-  e_fallthru = e_jump = e;
-
-  /* Get the type of block and type of condition.  */
-  cond_type = NO_COND;
-  cond_block_type = NO_COND_BLOCK;
-  if (GET_CODE (sb->end) == JUMP_INSN && ! simplejump_p (sb->end)
-      && condjump_p (sb->end))
+  basic_block bb;
+
+  /* Information about the best end (end after rotation) of the loop.  */
+  basic_block best_bb = NULL;
+  edge best_edge = NULL;
+  int best_freq = -1;
+  gcov_type best_count = -1;
+  /* The best edge is preferred when its destination is not visited yet
+     or is a start block of some trace.  */
+  bool is_preferred = false;
+
+  /* Find the most frequent edge that goes out from current trace.  */
+  bb = back_edge->dest;
+  do
     {
-      if (e->flags & EDGE_FALLTHRU)
-       {
-         if (e == sb->succ)
-           e_jump = sb->succ->succ_next;
-         else if (e == sb->succ->succ_next)
-           e_jump = sb->succ;
-         else
-           abort ();
-       }
-      else
-       {
-         if (e == sb->succ)
-           e_fallthru = sb->succ->succ_next;
-         else if (e == sb->succ->succ_next)
-           e_fallthru = sb->succ;
-         else
-           abort ();
-       }
-
-      if (e->flags & EDGE_FALLTHRU)
-       cond_block_type = THEN_BLOCK;
-      else if (get_common_dest (e_fallthru->dest, sb))
-       cond_block_type = NO_ELSE_BLOCK;
-      else 
-       cond_block_type = ELSE_BLOCK;
+      edge e;
+      edge_iterator ei;
 
-      if (get_common_dest (e_fallthru->dest, sb))
+      FOR_EACH_EDGE (e, ei, bb->succs)
+       if (e->dest != EXIT_BLOCK_PTR
+           && e->dest->rbi->visited != trace_n
+           && (e->flags & EDGE_CAN_FALLTHRU)
+           && !(e->flags & EDGE_COMPLEX))
        {
-         if (cond_block_type == THEN_BLOCK)
+         if (is_preferred)
            {
-             if (! (REORDER_BLOCK_FLAGS (e->dest)
-                    & REORDER_BLOCK_VISITED))
-               cond_type = PREDICT_THEN_NO_ELSE;
-             else
-               cond_type = PREDICT_NOT_THEN_NO_ELSE;
+             /* The best edge is preferred.  */
+             if (!e->dest->rbi->visited
+                 || bbd[e->dest->index].start_of_trace >= 0)
+               {
+                 /* The current edge E is also preferred.  */
+                 int freq = EDGE_FREQUENCY (e);
+                 if (freq > best_freq || e->count > best_count)
+                   {
+                     best_freq = freq;
+                     best_count = e->count;
+                     best_edge = e;
+                     best_bb = bb;
+                   }
+               }
            }
-         else if (cond_block_type == NO_ELSE_BLOCK)
+         else
            {
-             if (! (REORDER_BLOCK_FLAGS (e->dest)
-                    & REORDER_BLOCK_VISITED))
-               cond_type = PREDICT_NOT_THEN_NO_ELSE;
+             if (!e->dest->rbi->visited
+                 || bbd[e->dest->index].start_of_trace >= 0)
+               {
+                 /* The current edge E is preferred.  */
+                 is_preferred = true;
+                 best_freq = EDGE_FREQUENCY (e);
+                 best_count = e->count;
+                 best_edge = e;
+                 best_bb = bb;
+               }
              else
-               cond_type = PREDICT_THEN_NO_ELSE;
+               {
+                 int freq = EDGE_FREQUENCY (e);
+                 if (!best_edge || freq > best_freq || e->count > best_count)
+                   {
+                     best_freq = freq;
+                     best_count = e->count;
+                     best_edge = e;
+                     best_bb = bb;
+                   }
+               }
            }
        }
+      bb = bb->rbi->next;
+    }
+  while (bb != back_edge->dest);
+
+  if (best_bb)
+    {
+      /* Rotate the loop so that the BEST_EDGE goes out from the last block of
+        the trace.  */
+      if (back_edge->dest == trace->first)
+       {
+         trace->first = best_bb->rbi->next;
+       }
       else
        {
-         if (cond_block_type == THEN_BLOCK)
-           {
-             if (! (REORDER_BLOCK_FLAGS (e->dest)
-                    & REORDER_BLOCK_VISITED))
-               cond_type = PREDICT_THEN_WITH_ELSE;
-             else
-               cond_type = PREDICT_ELSE;
-           }
-         else if (cond_block_type == ELSE_BLOCK
-                  && e_fallthru->dest != EXIT_BLOCK_PTR)
+         basic_block prev_bb;
+
+         for (prev_bb = trace->first;
+              prev_bb->rbi->next != back_edge->dest;
+              prev_bb = prev_bb->rbi->next)
+           ;
+         prev_bb->rbi->next = best_bb->rbi->next;
+
+         /* Try to get rid of uncond jump to cond jump.  */
+         if (EDGE_COUNT (prev_bb->succs) == 1)
            {
-             if (! (REORDER_BLOCK_FLAGS (e->dest)
-                    & REORDER_BLOCK_VISITED))
-               cond_type = PREDICT_ELSE;
-             else
-               cond_type = PREDICT_THEN_WITH_ELSE;
+             basic_block header = EDGE_SUCC (prev_bb, 0)->dest;
+
+             /* Duplicate HEADER if it is a small block containing cond jump
+                in the end.  */
+             if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
+                 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP, 
+                                    NULL_RTX))
+               {
+                 copy_bb (header, EDGE_SUCC (prev_bb, 0), prev_bb, trace_n);
+               }
            }
        }
     }
-  
-  if (rtl_dump_file)
+  else
     {
-      static const char * cond_type_str [] = {"not cond jump", "predict then",
-                                             "predict else",
-                                             "predict then w/o else",
-                                             "predict not then w/o else"};
-      static const char * cond_block_type_str [] = {"not then or else block",
-                                                   "then block",
-                                                   "else block",
-                                                   "then w/o else block"};
-
-      fprintf (rtl_dump_file, "     %s (looking at %s)\n",
-              cond_type_str[(int)cond_type],
-              cond_block_type_str[(int)cond_block_type]);
+      /* We have not found suitable loop tail so do no rotation.  */
+      best_bb = back_edge->src;
     }
+  best_bb->rbi->next = NULL;
+  return best_bb;
+}
 
-  /* Reflect that then block will move and we'll jump to it.  */
-  if (cond_block_type != THEN_BLOCK
-      && (cond_type == PREDICT_ELSE
-         || cond_type == PREDICT_NOT_THEN_NO_ELSE))
-    {
-      if (rtl_dump_file)
-       fprintf (rtl_dump_file,
-                "    then jump from block %d to block %d\n",
-                sb->index, e_fallthru->dest->index);
+/* This function marks BB that it was visited in trace number TRACE.  */
 
-      /* Jump to reordered then block.  */
-      REORDER_BLOCK_ADD_JUMP (sb) = e_fallthru->dest;
-    }
-  
-  /* Reflect that then block will jump back when we have no else.  */
-  if (cond_block_type != THEN_BLOCK
-      && cond_type == PREDICT_NOT_THEN_NO_ELSE)
+static void
+mark_bb_visited (basic_block bb, int trace)
+{
+  bb->rbi->visited = trace;
+  if (bbd[bb->index].heap)
     {
-      basic_block jbb = e_fallthru->dest;
-      for (ee = jbb->succ;
-          ee && ! (ee->flags & EDGE_FALLTHRU);
-          ee = ee->succ_next)
-       continue;
-
-      if (ee && ! (GET_CODE (jbb->end) == JUMP_INSN
-                  && ! simplejump_p (jbb->end)))
-       {
-         REORDER_BLOCK_ADD_JUMP (jbb) = ee->dest;
-       }
+      fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
+      bbd[bb->index].heap = NULL;
+      bbd[bb->index].node = NULL;
     }
+}
 
-  /* Reflect that else block will jump back.  */
-  if (cond_block_type == ELSE_BLOCK
-      && (cond_type == PREDICT_THEN_WITH_ELSE || cond_type == PREDICT_ELSE))
-    {
-      last_edge=db->succ;
-
-      if (last_edge
-         && last_edge->dest != EXIT_BLOCK_PTR
-         && GET_CODE (last_edge->dest->head) == CODE_LABEL
-         && ! (GET_CODE (db->end) == JUMP_INSN))
-       {
-         if (rtl_dump_file)
-           fprintf (rtl_dump_file,
-                    "     else jump from block %d to block %d\n",
-                    db->index, last_edge->dest->index);
+/* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
+   not include basic blocks their probability is lower than BRANCH_TH or their
+   frequency is lower than EXEC_TH into traces (or count is lower than
+   COUNT_TH).  It stores the new traces into TRACES and modifies the number of
+   traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
+   expects that starting basic blocks are in *HEAP and at the end it deletes
+   *HEAP and stores starting points for the next round into new *HEAP.  */
 
-         REORDER_BLOCK_ADD_JUMP (db) = last_edge->dest;
-       }
-    }
+static void
+find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
+                    struct trace *traces, int *n_traces, int round,
+                    fibheap_t *heap, int number_of_rounds)
+{
+  /* The following variable refers to the last round in which non-"cold" 
+     blocks may be collected into a trace.  */
 
-  /* This block's successor has already been reordered. This can happen
-     when we reorder a chain starting at a then or else.  */
-  for (last_edge = db->succ;
-       last_edge && ! (last_edge->flags & EDGE_FALLTHRU);
-       last_edge = last_edge->succ_next)
-    continue;
-
-  if (last_edge
-      && last_edge->dest != EXIT_BLOCK_PTR
-      && (REORDER_BLOCK_FLAGS (last_edge->dest)
-         & REORDER_BLOCK_VISITED))
-    {
-      if (rtl_dump_file)
-       fprintf (rtl_dump_file,
-                "     end of chain jump from block %d to block %d\n",
-                db->index, last_edge->dest->index);
+  int last_round = N_ROUNDS - 1;
 
-      REORDER_BLOCK_ADD_JUMP (db) = last_edge->dest;
-    }
+  /* Heap for discarded basic blocks which are possible starting points for
+     the next round.  */
+  fibheap_t new_heap = fibheap_new ();
 
-  dbh_insn = REORDER_BLOCK_EFF_HEAD (db);
-  cebe_insn = REORDER_BLOCK_EFF_END (ceb);
-  dbe_insn = REORDER_BLOCK_EFF_END (db);
+  while (!fibheap_empty (*heap))
+    {
+      basic_block bb;
+      struct trace *trace;
+      edge best_edge, e;
+      fibheapkey_t key;
+      edge_iterator ei;
+
+      bb = fibheap_extract_min (*heap);
+      bbd[bb->index].heap = NULL;
+      bbd[bb->index].node = NULL;
+
+      if (dump_file)
+       fprintf (dump_file, "Getting bb %d\n", bb->index);
+
+      /* If the BB's frequency is too low send BB to the next round.  When
+         partitioning hot/cold blocks into separate sections, make sure all
+         the cold blocks (and ONLY the cold blocks) go into the (extra) final
+         round.  */
+
+      if (push_to_next_round_p (bb, round, number_of_rounds, exec_th, 
+                               count_th))
+       {
+         int key = bb_to_key (bb);
+         bbd[bb->index].heap = new_heap;
+         bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
+
+         if (dump_file)
+           fprintf (dump_file,
+                    "  Possible start point of next round: %d (key: %d)\n",
+                    bb->index, key);
+         continue;
+       }
 
-  /* Rechain predicted block.  */
-  NEXT_INSN (cebe_insn) = dbh_insn;
-  PREV_INSN (dbh_insn) = cebe_insn;
+      trace = traces + *n_traces;
+      trace->first = bb;
+      trace->round = round;
+      trace->length = 0;
+      (*n_traces)++;
 
-  if (db->index != n_basic_blocks - 1)
-    NEXT_INSN (dbe_insn) = 0;
+      do
+       {
+         int prob, freq;
 
-  return db;
-}
+         /* The probability and frequency of the best edge.  */
+         int best_prob = INT_MIN / 2;
+         int best_freq = INT_MIN / 2;
 
+         best_edge = NULL;
+         mark_bb_visited (bb, *n_traces);
+         trace->length++;
 
-/* Reorder blocks starting at block BB.  */
+         if (dump_file)
+           fprintf (dump_file, "Basic block %d was visited in trace %d\n",
+                    bb->index, *n_traces - 1);
 
-static void
-make_reorder_chain (bb)
-     basic_block bb;
-{
-  edge e;
-  basic_block visited_edge = NULL;
-  rtx block_end;
-  int probability;
+         /* Select the successor that will be placed after BB.  */
+         FOR_EACH_EDGE (e, ei, bb->succs)
+           {
+             gcc_assert (!(e->flags & EDGE_FAKE));
 
-  if (bb == EXIT_BLOCK_PTR)
-    return;
+             if (e->dest == EXIT_BLOCK_PTR)
+               continue;
 
-  /* Find the most probable block.  */
-  e = bb->succ;
-  block_end = bb->end;
-  if (GET_CODE (block_end) == JUMP_INSN && condjump_p (block_end))
-    {
-      rtx note = find_reg_note (block_end, REG_BR_PROB, 0);
+             if (e->dest->rbi->visited
+                 && e->dest->rbi->visited != *n_traces)
+               continue;
 
-      if (note) 
-       probability = INTVAL (XEXP (note, 0));
-      else
-       probability = 0;
+             if (BB_PARTITION (e->dest) == BB_COLD_PARTITION
+                 && round < last_round)
+               continue;
 
-      if (probability > REG_BR_PROB_BASE / 2)
-       e = bb->succ->succ_next;
-    }
+             prob = e->probability;
+             freq = EDGE_FREQUENCY (e);
 
-  /* Add chosen successor to chain and recurse on it.  */
-  if (e && e->dest != EXIT_BLOCK_PTR
-      && e->dest != e->src
-      && (! (REORDER_BLOCK_FLAGS (e->dest) & REORDER_BLOCK_VISITED)
-         || (REORDER_BLOCK_FLAGS (e->dest) == REORDER_BLOCK_HEAD)))
-    {
-      if (! (REORDER_BLOCK_FLAGS (bb) & REORDER_BLOCK_VISITED))
-       {
-         REORDER_BLOCK_FLAGS (bb) |= REORDER_BLOCK_HEAD;
-         REORDER_BLOCK_INDEX (bb) = reorder_index++;
-         REORDER_BLOCK_FLAGS (bb) |= REORDER_BLOCK_VISITED;
-       }
+             /* Edge that cannot be fallthru or improbable or infrequent
+                successor (i.e. it is unsuitable successor).  */
+             if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
+                 || prob < branch_th || freq < exec_th || e->count < count_th)
+               continue;
 
-      if (REORDER_BLOCK_FLAGS (e->dest) & REORDER_BLOCK_VISITED)
-       REORDER_BLOCK_FLAGS (e->dest) &= ~REORDER_BLOCK_HEAD;
-       
-      visited_edge = e->dest;
+             /* If partitioning hot/cold basic blocks, don't consider edges
+                that cross section boundaries.  */
 
-      reorder_last_visited = chain_reorder_blocks (e, bb);
+             if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
+                                best_edge))
+               {
+                 best_edge = e;
+                 best_prob = prob;
+                 best_freq = freq;
+               }
+           }
 
-      if (e->dest
-         && ! (REORDER_BLOCK_FLAGS (e->dest)
-               & REORDER_BLOCK_VISITED))
-       make_reorder_chain (e->dest);
-    }
-  else
-    {
-      if (! (REORDER_BLOCK_FLAGS (bb) & REORDER_BLOCK_VISITED))
-       {
-         REORDER_BLOCK_INDEX (bb) = reorder_index++;
-         REORDER_BLOCK_FLAGS (bb) |= REORDER_BLOCK_VISITED;
-       }
-    }
+         /* If the best destination has multiple predecessors, and can be
+            duplicated cheaper than a jump, don't allow it to be added
+            to a trace.  We'll duplicate it when connecting traces.  */
+         if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
+             && copy_bb_p (best_edge->dest, 0))
+           best_edge = NULL;
 
-  /* Recurse on the successors.  */
-  for (e = bb->succ; e; e = e->succ_next)
-    {
-      if (e->dest && e->dest == EXIT_BLOCK_PTR)
-       continue;
+         /* Add all non-selected successors to the heaps.  */
+         FOR_EACH_EDGE (e, ei, bb->succs)
+           {
+             if (e == best_edge
+                 || e->dest == EXIT_BLOCK_PTR
+                 || e->dest->rbi->visited)
+               continue;
 
-      if (e->dest
-         && e->dest != e->src
-         && e->dest != visited_edge
-         && ! (REORDER_BLOCK_FLAGS (e->dest)
-               & REORDER_BLOCK_VISITED))
-       {
-         reorder_last_visited
-           = chain_reorder_blocks (e, reorder_last_visited);
-         make_reorder_chain (e->dest);
-       }
-    }
-}
+             key = bb_to_key (e->dest);
 
+             if (bbd[e->dest->index].heap)
+               {
+                 /* E->DEST is already in some heap.  */
+                 if (key != bbd[e->dest->index].node->key)
+                   {
+                     if (dump_file)
+                       {
+                         fprintf (dump_file,
+                                  "Changing key for bb %d from %ld to %ld.\n",
+                                  e->dest->index,
+                                  (long) bbd[e->dest->index].node->key,
+                                  key);
+                       }
+                     fibheap_replace_key (bbd[e->dest->index].heap,
+                                          bbd[e->dest->index].node, key);
+                   }
+               }
+             else
+               {
+                 fibheap_t which_heap = *heap;
 
-/* Fixup jumps and labels after reordering basic blocks.  */ 
+                 prob = e->probability;
+                 freq = EDGE_FREQUENCY (e);
 
-static void
-fixup_reorder_chain ()
-{
-  int i, j;
-  rtx insn;
-  int orig_num_blocks = n_basic_blocks;
+                 if (!(e->flags & EDGE_CAN_FALLTHRU)
+                     || (e->flags & EDGE_COMPLEX)
+                     || prob < branch_th || freq < exec_th
+                     || e->count < count_th)
+                   {
+                     /* When partitioning hot/cold basic blocks, make sure
+                        the cold blocks (and only the cold blocks) all get
+                        pushed to the last round of trace collection.  */
+
+                     if (push_to_next_round_p (e->dest, round, 
+                                               number_of_rounds,
+                                               exec_th, count_th))
+                       which_heap = new_heap;
+                   }
 
-  /* Set the new last insn.  */
-  {
-    int max_val = 0;
-    int max_index = 0;
-    for (j = 0; j < n_basic_blocks; j++) 
-      {
-       int val = REORDER_BLOCK_INDEX (BASIC_BLOCK (j));
-       if (val > max_val)
-         {
-           max_val = val;
-           max_index = j;
-         }
-      }
-    insn = REORDER_BLOCK_EFF_END (BASIC_BLOCK (max_index));
-    NEXT_INSN (insn) = NULL_RTX;
-    set_last_insn (insn);
-  }
+                 bbd[e->dest->index].heap = which_heap;
+                 bbd[e->dest->index].node = fibheap_insert (which_heap,
+                                                               key, e->dest);
 
-  /* Add jumps and labels to fixup blocks.  */
-  for (i = 0; i < orig_num_blocks; i++)
-    {
-      int need_block = 0;
-      basic_block bbi = BASIC_BLOCK (i);
-      if (REORDER_BLOCK_ADD_JUMP (bbi))
-       {
-         rtx label_insn, jump_insn, barrier_insn;
+                 if (dump_file)
+                   {
+                     fprintf (dump_file,
+                              "  Possible start of %s round: %d (key: %ld)\n",
+                              (which_heap == new_heap) ? "next" : "this",
+                              e->dest->index, (long) key);
+                   }
 
-         if (GET_CODE (REORDER_BLOCK_ADD_JUMP (bbi)->head) == CODE_LABEL)
-           label_insn  = REORDER_BLOCK_ADD_JUMP (bbi)->head;
-         else
-           {
-             rtx new_label = gen_label_rtx ();
-             label_insn = emit_label_before (new_label,
-                             REORDER_BLOCK_ADD_JUMP (bbi)->head);
-             REORDER_BLOCK_ADD_JUMP (bbi)->head = label_insn;   
+               }
            }
 
-         if (GET_CODE (bbi->end) != JUMP_INSN)
+         if (best_edge) /* Suitable successor was found.  */
            {
-             jump_insn = emit_jump_insn_after (gen_jump (label_insn),
-                                               bbi->end);
-             bbi->end = jump_insn;
-             need_block = 0;
-           }
-         else
-           {
-             jump_insn = emit_jump_insn_after (gen_jump (label_insn),
-                                               REORDER_BLOCK_EFF_END (bbi));
-             need_block = 1;
-           }
+             if (best_edge->dest->rbi->visited == *n_traces)
+               {
+                 /* We do nothing with one basic block loops.  */
+                 if (best_edge->dest != bb)
+                   {
+                     if (EDGE_FREQUENCY (best_edge)
+                         > 4 * best_edge->dest->frequency / 5)
+                       {
+                         /* The loop has at least 4 iterations.  If the loop
+                            header is not the first block of the function
+                            we can rotate the loop.  */
+
+                         if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
+                           {
+                             if (dump_file)
+                               {
+                                 fprintf (dump_file,
+                                          "Rotating loop %d - %d\n",
+                                          best_edge->dest->index, bb->index);
+                               }
+                             bb->rbi->next = best_edge->dest;
+                             bb = rotate_loop (best_edge, trace, *n_traces);
+                           }
+                       }
+                     else
+                       {
+                         /* The loop has less than 4 iterations.  */
+
+                         /* Check whether there is another edge from BB.  */
+                         edge another_edge;
+                         FOR_EACH_EDGE (another_edge, ei, bb->succs)
+                           if (another_edge != best_edge)
+                             break;
+
+                         if (!another_edge && copy_bb_p (best_edge->dest,
+                                                         !optimize_size))
+                           {
+                             bb = copy_bb (best_edge->dest, best_edge, bb,
+                                           *n_traces);
+                           }
+                       }
+                   }
 
-         JUMP_LABEL (jump_insn) = label_insn;
-         ++LABEL_NUSES (label_insn);
-         barrier_insn = emit_barrier_after (jump_insn);
+                 /* Terminate the trace.  */
+                 break;
+               }
+             else
+               {
+                 /* Check for a situation
+
+                   A
+                  /|
+                 B |
+                  \|
+                   C
+
+                 where
+                 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
+                   >= EDGE_FREQUENCY (AC).
+                 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
+                 Best ordering is then A B C.
+
+                 This situation is created for example by:
+
+                 if (A) B;
+                 C;
+
+                 */
+
+                 FOR_EACH_EDGE (e, ei, bb->succs)
+                   if (e != best_edge
+                       && (e->flags & EDGE_CAN_FALLTHRU)
+                       && !(e->flags & EDGE_COMPLEX)
+                       && !e->dest->rbi->visited
+                       && EDGE_COUNT (e->dest->preds) == 1
+                       && !(e->flags & EDGE_CROSSING)
+                       && EDGE_COUNT (e->dest->succs) == 1
+                       && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU)
+                       && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX)
+                       && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest
+                       && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
+                     {
+                       best_edge = e;
+                       if (dump_file)
+                         fprintf (dump_file, "Selecting BB %d\n",
+                                  best_edge->dest->index);
+                       break;
+                     }
+
+                 bb->rbi->next = best_edge->dest;
+                 bb = best_edge->dest;
+               }
+           }
+       }
+      while (best_edge);
+      trace->last = bb;
+      bbd[trace->first->index].start_of_trace = *n_traces - 1;
+      bbd[trace->last->index].end_of_trace = *n_traces - 1;
+
+      /* The trace is terminated so we have to recount the keys in heap
+        (some block can have a lower key because now one of its predecessors
+        is an end of the trace).  */
+      FOR_EACH_EDGE (e, ei, bb->succs)
+       {
+         if (e->dest == EXIT_BLOCK_PTR
+             || e->dest->rbi->visited)
+           continue;
 
-         /* Add block for jump.  Typically this is when a then is not
-            predicted and we are jumping to the moved then block.  */
-         if (need_block)
+         if (bbd[e->dest->index].heap)
            {
-             basic_block nb;
-
-             VARRAY_GROW (basic_block_info, ++n_basic_blocks);
-             create_basic_block (n_basic_blocks - 1, jump_insn,
-                                 jump_insn, NULL);
-             nb = BASIC_BLOCK (n_basic_blocks - 1);
-             nb->global_live_at_start
-               = OBSTACK_ALLOC_REG_SET (function_obstack);
-             nb->global_live_at_end
-               = OBSTACK_ALLOC_REG_SET (function_obstack);
-
-             COPY_REG_SET (nb->global_live_at_start,
-                           bbi->global_live_at_start);
-             COPY_REG_SET (nb->global_live_at_end,
-                           bbi->global_live_at_start);
-             BASIC_BLOCK (nb->index)->local_set = 0;
-
-             nb->aux = xcalloc (1, sizeof (struct reorder_block_def));
-             REORDER_BLOCK_INDEX (nb) = REORDER_BLOCK_INDEX (bbi) + 1;
-             /* Relink to new block.  */
-             nb->succ = bbi->succ;
-             nb->succ->src = nb;
-
-             make_edge (NULL, bbi, nb, 0);
-             bbi->succ->succ_next
-               = bbi->succ->succ_next->succ_next;
-             nb->succ->succ_next = 0;
-             /* Fix reorder block index to reflect new block.  */
-             for (j = 0; j < n_basic_blocks - 1; j++)
+             key = bb_to_key (e->dest);
+             if (key != bbd[e->dest->index].node->key)
                {
-                 basic_block bbj = BASIC_BLOCK (j);
-                 if (REORDER_BLOCK_INDEX (bbj)
-                     >= REORDER_BLOCK_INDEX (bbi) + 1)
-                   REORDER_BLOCK_INDEX (bbj)++;
+                 if (dump_file)
+                   {
+                     fprintf (dump_file,
+                              "Changing key for bb %d from %ld to %ld.\n",
+                              e->dest->index,
+                              (long) bbd[e->dest->index].node->key, key);
+                   }
+                 fibheap_replace_key (bbd[e->dest->index].heap,
+                                      bbd[e->dest->index].node,
+                                      key);
                }
-             REORDER_BLOCK_SCOPE (nb) = REORDER_BLOCK_SCOPE (bbi);
-             REORDER_BLOCK_EFF_HEAD (nb) = nb->head;
-             REORDER_BLOCK_EFF_END (nb) = barrier_insn;
            }
-         else
-           REORDER_BLOCK_EFF_END (bbi) = barrier_insn;
        }
     }
+
+  fibheap_delete (*heap);
+
+  /* "Return" the new heap.  */
+  *heap = new_heap;
 }
 
+/* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
+   it to trace after BB, mark OLD_BB visited and update pass' data structures
+   (TRACE is a number of trace which OLD_BB is duplicated to).  */
 
-/* Perform sanity checks on the insn chain.
-   1. Check that next/prev pointers are consistent in both the forward and
-      reverse direction.
-   2. Count insns in chain, going both directions, and check if equal.
-   3. Check that get_last_insn () returns the actual end of chain.  */
-#ifdef ENABLE_CHECKING
-static void
-verify_insn_chain ()
+static basic_block
+copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
 {
-  rtx x,
-      prevx,
-      nextx;
-  int insn_cnt1,
-      insn_cnt2;
-
-  prevx = NULL;
-  insn_cnt1 = 1;
-  for (x = get_insns (); x; x = NEXT_INSN (x))
+  basic_block new_bb;
+
+  new_bb = duplicate_block (old_bb, e);
+  BB_COPY_PARTITION (new_bb, old_bb);
+
+  gcc_assert (e->dest == new_bb);
+  gcc_assert (!e->dest->rbi->visited);
+
+  if (dump_file)
+    fprintf (dump_file,
+            "Duplicated bb %d (created bb %d)\n",
+            old_bb->index, new_bb->index);
+  new_bb->rbi->visited = trace;
+  new_bb->rbi->next = bb->rbi->next;
+  bb->rbi->next = new_bb;
+
+  if (new_bb->index >= array_size || last_basic_block > array_size)
     {
-      if (PREV_INSN (x) != prevx)
+      int i;
+      int new_size;
+
+      new_size = MAX (last_basic_block, new_bb->index + 1);
+      new_size = GET_ARRAY_SIZE (new_size);
+      bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data));
+      for (i = array_size; i < new_size; i++)
        {
-         fprintf (stderr, "Forward traversal: insn chain corrupt.\n");
-         fprintf (stderr, "previous insn:\n");
-         debug_rtx (prevx);
-         fprintf (stderr, "current insn:\n");
-         debug_rtx (x);
-         abort ();
+         bbd[i].start_of_trace = -1;
+         bbd[i].end_of_trace = -1;
+         bbd[i].heap = NULL;
+         bbd[i].node = NULL;
        }
-      ++insn_cnt1;
-      prevx = x;
-    }
+      array_size = new_size;
 
-  if (prevx != get_last_insn ())
-    {
-      fprintf (stderr, "last_insn corrupt.\n");
-      abort ();
-    }
-
-  nextx = NULL;
-  insn_cnt2 = 1;
-  for (x = get_last_insn (); x; x = PREV_INSN (x))
-    {
-      if (NEXT_INSN (x) != nextx)
+      if (dump_file)
        {
-         fprintf (stderr, "Reverse traversal: insn chain corrupt.\n");
-         fprintf (stderr, "current insn:\n");
-         debug_rtx (x);
-         fprintf (stderr, "next insn:\n");
-         debug_rtx (nextx);
-         abort ();
+         fprintf (dump_file,
+                  "Growing the dynamic array to %d elements.\n",
+                  array_size);
        }
-      ++insn_cnt2;
-      nextx = x;
     }
 
-  if (insn_cnt1 != insn_cnt2)
-    {
-      fprintf (stderr, "insn_cnt1 (%d) not equal to insn_cnt2 (%d).\n",
-              insn_cnt1, insn_cnt2);
-      abort ();
-    }
+  return new_bb;
 }
-#endif
 
-static rtx
-get_next_bb_note (x)
-     rtx x;
+/* Compute and return the key (for the heap) of the basic block BB.  */
+
+static fibheapkey_t
+bb_to_key (basic_block bb)
 {
-  while (x)
+  edge e;
+  edge_iterator ei;
+  int priority = 0;
+
+  /* Do not start in probably never executed blocks.  */
+
+  if (BB_PARTITION (bb) == BB_COLD_PARTITION
+      || probably_never_executed_bb_p (bb))
+    return BB_FREQ_MAX;
+
+  /* Prefer blocks whose predecessor is an end of some trace
+     or whose predecessor edge is EDGE_DFS_BACK.  */
+  FOR_EACH_EDGE (e, ei, bb->preds)
     {
-      if (GET_CODE (x) == NOTE
-         && NOTE_LINE_NUMBER (x) == NOTE_INSN_BASIC_BLOCK)
-       return x;
-      x = NEXT_INSN (x);
+      if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
+         || (e->flags & EDGE_DFS_BACK))
+       {
+         int edge_freq = EDGE_FREQUENCY (e);
+
+         if (edge_freq > priority)
+           priority = edge_freq;
+       }
     }
-  return NULL;
+
+  if (priority)
+    /* The block with priority should have significantly lower key.  */
+    return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
+  return -bb->frequency;
 }
 
+/* Return true when the edge E from basic block BB is better than the temporary
+   best edge (details are in function).  The probability of edge E is PROB. The
+   frequency of the successor is FREQ.  The current best probability is
+   BEST_PROB, the best frequency is BEST_FREQ.
+   The edge is considered to be equivalent when PROB does not differ much from
+   BEST_PROB; similarly for frequency.  */
 
-static rtx
-get_prev_bb_note (x)
-     rtx x;
+static bool
+better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
+              int best_freq, edge cur_best_edge)
 {
-  while (x)
-    {
-      if (GET_CODE (x) == NOTE
-         && NOTE_LINE_NUMBER (x) == NOTE_INSN_BASIC_BLOCK)
-       return x;
-      x = PREV_INSN (x);
-    }
-  return NULL;
-}
+  bool is_better_edge;
+
+  /* The BEST_* values do not have to be best, but can be a bit smaller than
+     maximum values.  */
+  int diff_prob = best_prob / 10;
+  int diff_freq = best_freq / 10;
+
+  if (prob > best_prob + diff_prob)
+    /* The edge has higher probability than the temporary best edge.  */
+    is_better_edge = true;
+  else if (prob < best_prob - diff_prob)
+    /* The edge has lower probability than the temporary best edge.  */
+    is_better_edge = false;
+  else if (freq < best_freq - diff_freq)
+    /* The edge and the temporary best edge  have almost equivalent
+       probabilities.  The higher frequency of a successor now means
+       that there is another edge going into that successor.
+       This successor has lower frequency so it is better.  */
+    is_better_edge = true;
+  else if (freq > best_freq + diff_freq)
+    /* This successor has higher frequency so it is worse.  */
+    is_better_edge = false;
+  else if (e->dest->prev_bb == bb)
+    /* The edges have equivalent probabilities and the successors
+       have equivalent frequencies.  Select the previous successor.  */
+    is_better_edge = true;
+  else
+    is_better_edge = false;
+
+  /* If we are doing hot/cold partitioning, make sure that we always favor
+     non-crossing edges over crossing edges.  */
 
+  if (!is_better_edge
+      && flag_reorder_blocks_and_partition 
+      && cur_best_edge 
+      && (cur_best_edge->flags & EDGE_CROSSING)
+      && !(e->flags & EDGE_CROSSING))
+    is_better_edge = true;
 
-/* Determine and record the relationships between basic blocks and
-   scopes in scope tree S.  */
+  return is_better_edge;
+}
+
+/* Connect traces in array TRACES, N_TRACES is the count of traces.  */
 
 static void
-relate_bbs_with_scopes (s)
-     scope s;
+connect_traces (int n_traces, struct trace *traces)
 {
-  scope p;
-  int i, bbi1, bbi2, bbs_spanned;
-  rtx bbnote;
+  int i;
+  int unconnected_hot_trace_count = 0;
+  bool cold_connected = true;
+  bool *connected;
+  bool *cold_traces;
+  int last_trace;
+  int freq_threshold;
+  gcov_type count_threshold;
+
+  freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
+  if (max_entry_count < INT_MAX / 1000)
+    count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
+  else
+    count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
 
-  for (p = s->inner; p; p = p->next)
-    relate_bbs_with_scopes (p);
+  connected = xcalloc (n_traces, sizeof (bool));
+  last_trace = -1;
 
-  bbi1 = bbi2 = -1;
-  bbs_spanned = 0;
+  /* If we are partitioning hot/cold basic blocks, mark the cold
+     traces as already connected, to remove them from consideration
+     for connection to the hot traces.  After the hot traces have all
+     been connected (determined by "unconnected_hot_trace_count"), we
+     will go back and connect the cold traces.  */
 
-  /* If the begin and end notes are both inside the same basic block,
-     or if they are both outside of basic blocks, then we know immediately
-     how they are related. Otherwise, we need to poke around to make the
-     determination.  */
-  if (s->bb_beg != s->bb_end)
-    {
-      if (s->bb_beg && s->bb_end)
-        {
-         /* Both notes are in different bbs. This implies that all the
-            basic blocks spanned by the pair of notes are contained in
-             this scope.  */
-         bbi1 = s->bb_beg->index;
-         bbi2 = s->bb_end->index;
-         bbs_spanned = 1;
-       }
-      else if (! s->bb_beg)
-        {
-         /* First note is outside of a bb. If the scope spans more than
-            one basic block, then they all are contained within this
-             scope. Otherwise, this scope is contained within the basic
-            block.  */
-         bbnote = get_next_bb_note (s->note_beg);
-         if (! bbnote)
-           abort ();
-         if (NOTE_BASIC_BLOCK (bbnote) == s->bb_end)
-           {
-             bbs_spanned = 0;
-             s->bb_beg = NOTE_BASIC_BLOCK (bbnote);
-           }
-         else
-           {
-             bbi1 = NOTE_BASIC_BLOCK (bbnote)->index;
-             bbi2 = s->bb_end->index;
-             s->bb_end = NULL;
-             bbs_spanned = 1;
-           }
-       }
-      else /* ! s->bb_end */
-        {
-         /* Second note is outside of a bb. If the scope spans more than
-            one basic block, then they all are contained within this
-             scope. Otherwise, this scope is contained within the basic
-            block.  */
-         bbnote = get_prev_bb_note (s->note_end);
-         if (! bbnote)
-           abort ();
-         if (NOTE_BASIC_BLOCK (bbnote) == s->bb_beg)
-           {
-             bbs_spanned = 0;
-             s->bb_end = NOTE_BASIC_BLOCK (bbnote);
-           }
-         else
-           {
-             bbi1 = s->bb_beg->index;
-             bbi2 = NOTE_BASIC_BLOCK (bbnote)->index;
-             s->bb_beg = NULL;
-             bbs_spanned = 1;
-           }
-       }
-    }
-  else
+  cold_traces = xcalloc (n_traces, sizeof (bool));
+
+  if (flag_reorder_blocks_and_partition)
+    for (i = 0; i < n_traces; i++)
+      {
+       if (BB_PARTITION (traces[i].first) == BB_COLD_PARTITION)
+         {
+           connected[i] = true;
+           cold_traces[i] = true;
+           cold_connected = false;
+         }
+       else
+         unconnected_hot_trace_count++;
+      }
+  
+  for (i = 0; i < n_traces || !cold_connected ; i++)
     {
-      if (s->bb_beg)
-        /* Both notes are in the same bb, which implies the block
-          contains this scope.  */
-       bbs_spanned = 0;
-      else
+      int t = i;
+      int t2;
+      edge e, best;
+      int best_len;
+
+      /* If we are partitioning hot/cold basic blocks, check to see
+        if all the hot traces have been connected.  If so, go back
+        and mark the cold traces as unconnected so we can connect
+        them up too.  Re-set "i" to the first (unconnected) cold
+        trace. Use flag "cold_connected" to make sure we don't do
+         this step more than once.  */
+
+      if (flag_reorder_blocks_and_partition
+         && (i >= n_traces || unconnected_hot_trace_count <= 0)
+         && !cold_connected)
        {
-          rtx x1, x2;
-         /* Both notes are outside of any bbs. This implies that all the
-            basic blocks spanned by the pair of notes are contained in
-             this scope. 
-            There is a degenerate case to consider. If the notes do not
-            span any basic blocks, then it is an empty scope that can
-            safely be deleted or ignored. Mark these with level = -1.  */
-
-         x1 = get_next_bb_note (s->note_beg);
-         x2 = get_prev_bb_note (s->note_end);
-         if (! (x1 && x2))
-           {
-             s->level = -1; 
-             bbs_spanned = 0; 
-           }
-         else
-           {
-             bbi1 = NOTE_BASIC_BLOCK (x1)->index;
-             bbi2 = NOTE_BASIC_BLOCK (x2)->index;
-             bbs_spanned = 1;
-           }
+         int j;
+         int first_cold_trace = -1;
+
+         for (j = 0; j < n_traces; j++)
+           if (cold_traces[j])
+             {
+               connected[j] = false;
+               if (first_cold_trace == -1)
+                 first_cold_trace = j;
+             }
+         i = t = first_cold_trace;
+         cold_connected = true;
        }
-    }
-
 
-  /* If the scope spans one or more basic blocks, we record them. We
-     only record the bbs that are immediately contained within this
-     scope. Note that if a scope is contained within a bb, we can tell
-     by checking that bb_beg = bb_end and that they are non-null.  */
-  if (bbs_spanned)
-    {
-      int j = 0;
+      if (connected[t])
+       continue;
 
-      s->num_bbs = 0;
-      for (i = bbi1; i <= bbi2; i++)
-       if (! REORDER_BLOCK_SCOPE (BASIC_BLOCK (i)))
-         s->num_bbs++;
+      connected[t] = true;
+      if (unconnected_hot_trace_count > 0)
+       unconnected_hot_trace_count--;
 
-      s->bbs = xcalloc (s->num_bbs, sizeof (struct basic_block_def));
-      for (i = bbi1; i <= bbi2; i++)
+      /* Find the predecessor traces.  */
+      for (t2 = t; t2 > 0;)
        {
-         basic_block curr_bb = BASIC_BLOCK (i);
-         if (! REORDER_BLOCK_SCOPE (curr_bb))
+         edge_iterator ei;
+         best = NULL;
+         best_len = 0;
+         FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
            {
-             s->bbs[j++] = curr_bb;
-             REORDER_BLOCK_SCOPE (curr_bb) = s;
+             int si = e->src->index;
+
+             if (e->src != ENTRY_BLOCK_PTR
+                 && (e->flags & EDGE_CAN_FALLTHRU)
+                 && !(e->flags & EDGE_COMPLEX)
+                 && bbd[si].end_of_trace >= 0
+                 && !connected[bbd[si].end_of_trace]
+                 && (!best
+                     || e->probability > best->probability
+                     || (e->probability == best->probability
+                         && traces[bbd[si].end_of_trace].length > best_len)))
+               {
+                 best = e;
+                 best_len = traces[bbd[si].end_of_trace].length;
+               }
            }
-       }
-    }
-  else
-    s->num_bbs = 0;
-}
-
-
-/* Allocate and initialize a new scope structure with scope level LEVEL,
-   and record the NOTE beginning the scope.  */
-
-static scope 
-make_new_scope (level, note)
-     int level;
-     rtx note;
-{
-  scope new_scope = xcalloc (1, sizeof (struct scope_def));
-  new_scope->level = level;
-  new_scope->note_beg = note;
-  new_scope->note_end = NULL;
-  new_scope->bb_beg = NULL;
-  new_scope->bb_end = NULL;
-  new_scope->inner = NULL;
-  new_scope->inner_last = NULL;
-  new_scope->outer = NULL;
-  new_scope->next = NULL;
-  new_scope->num_bbs = 0;
-  new_scope->bbs = NULL;
-  return new_scope;
-}
+         if (best)
+           {
+             best->src->rbi->next = best->dest;
+             t2 = bbd[best->src->index].end_of_trace;
+             connected[t2] = true;
 
+             if (unconnected_hot_trace_count > 0)
+               unconnected_hot_trace_count--;
 
-/* Build a forest representing the scope structure of the function.
-   Return a pointer to a structure describing the forest.  */
+             if (dump_file)
+               {
+                 fprintf (dump_file, "Connection: %d %d\n",
+                          best->src->index, best->dest->index);
+               }
+           }
+         else
+           break;
+       }
 
-static void
-build_scope_forest (forest)
-    scope_forest_info *forest;
-{
-  rtx x;
-  int level, bbi, i;
-  basic_block curr_bb;
-  scope root, curr_scope;
-
-  forest->num_trees = 0;
-  forest->trees = NULL;
-  level = -1;
-  root = NULL;
-  curr_bb = NULL;
-  bbi = 0;
-  for (x = get_insns (); x; x = NEXT_INSN (x))
-    {
-      if (bbi < n_basic_blocks && x == BASIC_BLOCK (bbi)->head)
-       curr_bb = BASIC_BLOCK (bbi);
+      if (last_trace >= 0)
+       traces[last_trace].last->rbi->next = traces[t2].first;
+      last_trace = t;
 
-      if (GET_CODE (x) == NOTE)
+      /* Find the successor traces.  */
+      while (1)
        {
-         if (NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_BEG)
+         /* Find the continuation of the chain.  */
+         edge_iterator ei;
+         best = NULL;
+         best_len = 0;
+         FOR_EACH_EDGE (e, ei, traces[t].last->succs)
+           {
+             int di = e->dest->index;
+
+             if (e->dest != EXIT_BLOCK_PTR
+                 && (e->flags & EDGE_CAN_FALLTHRU)
+                 && !(e->flags & EDGE_COMPLEX)
+                 && bbd[di].start_of_trace >= 0
+                 && !connected[bbd[di].start_of_trace]
+                 && (!best
+                     || e->probability > best->probability
+                     || (e->probability == best->probability
+                         && traces[bbd[di].start_of_trace].length > best_len)))
+               {
+                 best = e;
+                 best_len = traces[bbd[di].start_of_trace].length;
+               }
+           }
+
+         if (best)
+           {
+             if (dump_file)
+               {
+                 fprintf (dump_file, "Connection: %d %d\n",
+                          best->src->index, best->dest->index);
+               }
+             t = bbd[best->dest->index].start_of_trace;
+             traces[last_trace].last->rbi->next = traces[t].first;
+             connected[t] = true;
+             if (unconnected_hot_trace_count > 0)
+               unconnected_hot_trace_count--;
+             last_trace = t;
+           }
+         else
            {
-             if (root)
+             /* Try to connect the traces by duplication of 1 block.  */
+             edge e2;
+             basic_block next_bb = NULL;
+             bool try_copy = false;
+
+             FOR_EACH_EDGE (e, ei, traces[t].last->succs)
+               if (e->dest != EXIT_BLOCK_PTR
+                   && (e->flags & EDGE_CAN_FALLTHRU)
+                   && !(e->flags & EDGE_COMPLEX)
+                   && (!best || e->probability > best->probability))
+                 {
+                   edge_iterator ei;
+                   edge best2 = NULL;
+                   int best2_len = 0;
+
+                   /* If the destination is a start of a trace which is only
+                      one block long, then no need to search the successor
+                      blocks of the trace.  Accept it.  */
+                   if (bbd[e->dest->index].start_of_trace >= 0
+                       && traces[bbd[e->dest->index].start_of_trace].length
+                          == 1)
+                     {
+                       best = e;
+                       try_copy = true;
+                       continue;
+                     }
+
+                   FOR_EACH_EDGE (e2, ei, e->dest->succs)
+                     {
+                       int di = e2->dest->index;
+
+                       if (e2->dest == EXIT_BLOCK_PTR
+                           || ((e2->flags & EDGE_CAN_FALLTHRU)
+                               && !(e2->flags & EDGE_COMPLEX)
+                               && bbd[di].start_of_trace >= 0
+                               && !connected[bbd[di].start_of_trace]
+                               && (EDGE_FREQUENCY (e2) >= freq_threshold)
+                               && (e2->count >= count_threshold)
+                               && (!best2
+                                   || e2->probability > best2->probability
+                                   || (e2->probability == best2->probability
+                                       && traces[bbd[di].start_of_trace].length
+                                          > best2_len))))
+                         {
+                           best = e;
+                           best2 = e2;
+                           if (e2->dest != EXIT_BLOCK_PTR)
+                             best2_len = traces[bbd[di].start_of_trace].length;
+                           else
+                             best2_len = INT_MAX;
+                           next_bb = e2->dest;
+                           try_copy = true;
+                         }
+                     }
+                 }
+
+             if (flag_reorder_blocks_and_partition)
+               try_copy = false;
+
+             /* Copy tiny blocks always; copy larger blocks only when the
+                edge is traversed frequently enough.  */
+             if (try_copy
+                 && copy_bb_p (best->dest,
+                               !optimize_size
+                               && EDGE_FREQUENCY (best) >= freq_threshold
+                               && best->count >= count_threshold))
                {
-                 scope new_scope;
-                 if (! curr_scope)
-                   abort();
-                 level++;
-                 new_scope = make_new_scope (level, x);
-                 new_scope->outer = curr_scope;
-                 new_scope->next = NULL;
-                 if (! curr_scope->inner)
+                 basic_block new_bb;
+
+                 if (dump_file)
                    {
-                     curr_scope->inner = new_scope;
-                     curr_scope->inner_last = new_scope;
+                     fprintf (dump_file, "Connection: %d %d ",
+                              traces[t].last->index, best->dest->index);
+                     if (!next_bb)
+                       fputc ('\n', dump_file);
+                     else if (next_bb == EXIT_BLOCK_PTR)
+                       fprintf (dump_file, "exit\n");
+                     else
+                       fprintf (dump_file, "%d\n", next_bb->index);
                    }
-                 else
+
+                 new_bb = copy_bb (best->dest, best, traces[t].last, t);
+                 traces[t].last = new_bb;
+                 if (next_bb && next_bb != EXIT_BLOCK_PTR)
                    {
-                     curr_scope->inner_last->next = new_scope;
-                     curr_scope->inner_last = new_scope;
+                     t = bbd[next_bb->index].start_of_trace;
+                     traces[last_trace].last->rbi->next = traces[t].first;
+                     connected[t] = true;
+                     if (unconnected_hot_trace_count > 0)
+                       unconnected_hot_trace_count--;
+                     last_trace = t;
                    }
-                 curr_scope = curr_scope->inner_last;
-               }
-             else
-               {
-                 int ntrees = forest->num_trees;
-                 level++;
-                 curr_scope = make_new_scope (level, x);
-                 root = curr_scope;
-                 if (ntrees == 0)
-                   forest->trees = xcalloc (1, sizeof (scope));
                  else
-                   forest->trees = xrealloc (forest->trees,
-                                             sizeof (scope) * (ntrees + 1));
-                 forest->trees[forest->num_trees++] = root;
+                   break;      /* Stop finding the successor traces.  */
                }
-             curr_scope->bb_beg = curr_bb;
-           }
-         else if (NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_END)
-           {
-             curr_scope->bb_end = curr_bb;
-             curr_scope->note_end = x;
-             level--;
-             curr_scope = curr_scope->outer;
-             if (level == -1)
-               root = NULL;
+             else
+               break;  /* Stop finding the successor traces.  */
            }
-       } /* if note */
-
-      if (curr_bb && curr_bb->end == x)
-       {
-         curr_bb = NULL;
-         bbi++;
        }
+    }
+
+  if (dump_file)
+    {
+      basic_block bb;
 
-    } /* for */
+      fprintf (dump_file, "Final order:\n");
+      for (bb = traces[0].first; bb; bb = bb->rbi->next)
+       fprintf (dump_file, "%d ", bb->index);
+      fprintf (dump_file, "\n");
+      fflush (dump_file);
+    }
 
-  for (i = 0; i < forest->num_trees; i++)
-    relate_bbs_with_scopes (forest->trees[i]);
+  FREE (connected);
+  FREE (cold_traces);
 }
 
+/* Return true when BB can and should be copied. CODE_MAY_GROW is true
+   when code size is allowed to grow by duplication.  */
 
-/* Remove all the NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes from
-   the insn chain.  */
-
-static void
-remove_scope_notes ()
+static bool
+copy_bb_p (basic_block bb, int code_may_grow)
 {
-  rtx x, next;
-  basic_block currbb = NULL;
+  int size = 0;
+  int max_size = uncond_jump_length;
+  rtx insn;
+
+  if (!bb->frequency)
+    return false;
+  if (EDGE_COUNT (bb->preds) < 2)
+    return false;
+  if (!can_duplicate_block_p (bb))
+    return false;
+
+  /* Avoid duplicating blocks which have many successors (PR/13430).  */
+  if (EDGE_COUNT (bb->succs) > 8)
+    return false;
+
+  if (code_may_grow && maybe_hot_bb_p (bb))
+    max_size *= 8;
 
-  for (x = get_insns (); x; x = next)
+  for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb));
+       insn = NEXT_INSN (insn))
     {
-      next = NEXT_INSN (x);
-      if (GET_CODE (x) == NOTE
-         && NOTE_LINE_NUMBER (x) == NOTE_INSN_BASIC_BLOCK)
-       currbb = NOTE_BASIC_BLOCK (x);
-
-      if (GET_CODE (x) == NOTE
-         && (NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_BEG
-             || NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_END))
-       {
-         /* Check if the scope note happens to be the end of a bb.  */
-         if (currbb && x == currbb->end)
-           currbb->end = PREV_INSN (x);
-         if (currbb && x == currbb->head)
-           abort ();
+      if (INSN_P (insn))
+       size += get_attr_length (insn);
+    }
 
-         if (PREV_INSN (x))
-           {
-             NEXT_INSN (PREV_INSN (x)) = next;
-             PREV_INSN (next) = PREV_INSN (x);
+  if (size <= max_size)
+    return true;
 
-              NEXT_INSN (x) = NULL;
-              PREV_INSN (x) = NULL;
-           }
-         else
-           abort ();
-       }
+  if (dump_file)
+    {
+      fprintf (dump_file,
+              "Block %d can't be copied because its size = %d.\n",
+              bb->index, size);
     }
+
+  return false;
 }
 
+/* Return the length of unconditional jump instruction.  */
 
-/* Insert scope note pairs for a contained scope tree S after insn IP.  */
-static void
-insert_intra_1 (s, ip)
-     scope s;
-     rtx *ip;
+static int
+get_uncond_jump_length (void)
 {
-  scope p;
+  rtx label, jump;
+  int length;
 
-  if (NOTE_BLOCK (s->note_beg))
-    {  
-      *ip = emit_note_after (NOTE_INSN_BLOCK_BEG, *ip);
-      NOTE_BLOCK (*ip) = NOTE_BLOCK (s->note_beg);
-    } 
+  label = emit_label_before (gen_label_rtx (), get_insns ());
+  jump = emit_jump_insn (gen_jump (label));
 
-  for (p = s->inner; p; p = p->next)
-    insert_intra_1 (p, ip);
+  length = get_attr_length (jump);
 
-  if (NOTE_BLOCK (s->note_beg))
-    {  
-      *ip = emit_note_after (NOTE_INSN_BLOCK_END, *ip);
-      NOTE_BLOCK (*ip) = NOTE_BLOCK (s->note_end);
-    }
+  delete_insn (jump);
+  delete_insn (label);
+  return length;
 }
 
-
-/* Insert NOTE_INSN_BLOCK_END notes and NOTE_INSN_BLOCK_BEG notes for
-   scopes that are contained within BB.  */
-
 static void
-insert_intra_bb_scope_notes (bb)
-     basic_block bb;
+add_unlikely_executed_notes (void)
 {
-  scope s = REORDER_BLOCK_SCOPE (bb);
-  scope p;
-  rtx ip;
-
-  if (! s)
-    return;
+  basic_block bb;
 
-  ip = bb->head;
-  if (GET_CODE (ip) == CODE_LABEL)
-    ip = NEXT_INSN (ip);
+  /* Add the UNLIKELY_EXECUTED_NOTES to each cold basic block.  */
 
-  for (p = s->inner; p; p = p->next)
-    {
-      if (p->bb_beg != NULL && p->bb_beg == p->bb_end && p->bb_beg == bb)
-       insert_intra_1 (p, &ip);
-    }
+  FOR_EACH_BB (bb)
+    if (BB_PARTITION (bb) == BB_COLD_PARTITION)
+      mark_bb_for_unlikely_executed_section (bb);
 }
 
-
-/* Given two consecutive basic blocks BB1 and BB2 with different scopes,
-   insert NOTE_INSN_BLOCK_END notes after BB1 and NOTE_INSN_BLOCK_BEG
-   notes before BB2 such that the notes are correctly balanced. If BB1 or
-   BB2 is NULL, we are inserting scope notes for the first and last basic
-   blocks, respectively.  */
+/* Find the basic blocks that are rarely executed and need to be moved to
+   a separate section of the .o file (to cut down on paging and improve
+   cache locality).  */
 
 static void
-insert_inter_bb_scope_notes (bb1, bb2)
-     basic_block bb1;
-     basic_block bb2;
+find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, 
+                                                     int *n_crossing_edges, 
+                                                     int *max_idx)
 {
-  rtx ip;
-  scope com;
-
-  /* It is possible that a basic block is not contained in any scope.
-     In that case, we either open or close a scope but not both.  */
-  if (bb1 && bb2)
-    {
-      scope s1 = REORDER_BLOCK_SCOPE (bb1);
-      scope s2 = REORDER_BLOCK_SCOPE (bb2);
-      if (! s1 && ! s2)
-       return;
-      if (! s1)
-       bb1 = NULL;
-      else if (! s2)
-       bb2 = NULL;
-    }
+  basic_block bb;
+  bool has_hot_blocks = false;
+  edge e;
+  int i;
+  edge_iterator ei;
 
-  /* Find common ancestor scope.  */
-  if (bb1 && bb2)
+  /* Mark which partition (hot/cold) each basic block belongs in.  */
+  
+  FOR_EACH_BB (bb)
     {
-      scope s1 = REORDER_BLOCK_SCOPE (bb1);
-      scope s2 = REORDER_BLOCK_SCOPE (bb2);
-      while (s1 != s2)
+      if (probably_never_executed_bb_p (bb))
+       BB_SET_PARTITION (bb, BB_COLD_PARTITION);
+      else
        {
-          if (! (s1 && s2))
-           abort ();
-         if (s1->level > s2->level)
-           s1 = s1->outer;
-         else if (s2->level > s1->level)
-           s2 = s2->outer;
-         else
-           {
-             s1 = s1->outer;
-             s2 = s2->outer;
-           }
+         BB_SET_PARTITION (bb, BB_HOT_PARTITION);
+         has_hot_blocks = true;
        }
-      com = s1;
     }
-  else
-    com = NULL;
 
-  /* Close scopes.  */
-  if (bb1)
-    {
-      scope s = REORDER_BLOCK_SCOPE (bb1);
-      ip = REORDER_BLOCK_EFF_END (bb1);
-      while (s != com)
+  /* Since all "hot" basic blocks will eventually be scheduled before all
+     cold basic blocks, make *sure* the real function entry block is in
+     the hot partition (if there is one).  */
+  
+  if (has_hot_blocks)
+    FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+      if (e->dest->index >= 0)
        {
-         if (NOTE_BLOCK (s->note_beg))
-           {  
-             ip = emit_note_after (NOTE_INSN_BLOCK_END, ip);
-             NOTE_BLOCK (ip) = NOTE_BLOCK (s->note_end);
-           }
-         s = s->outer;
+         BB_SET_PARTITION (e->dest, BB_HOT_PARTITION);
+         break;
        }
-    }
 
-  /* Open scopes.  */
-  if (bb2)
+  /* Mark every edge that crosses between sections.  */
+
+  i = 0;
+  if (targetm.have_named_sections)
     {
-      scope s = REORDER_BLOCK_SCOPE (bb2);
-      ip = bb2->head;
-      while (s != com)
-       {
-         if (NOTE_BLOCK (s->note_beg))
-           {  
-             ip = emit_note_before (NOTE_INSN_BLOCK_BEG, ip);
-             NOTE_BLOCK (ip) = NOTE_BLOCK (s->note_beg);
-           }
-         s = s->outer;
-       }
+      FOR_EACH_BB (bb)
+        FOR_EACH_EDGE (e, ei, bb->succs)
+         {
+           if (e->src != ENTRY_BLOCK_PTR
+               && e->dest != EXIT_BLOCK_PTR
+               && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
+             {
+               e->flags |= EDGE_CROSSING;
+               if (i == *max_idx)
+                 {
+                   *max_idx *= 2;
+                   crossing_edges = xrealloc (crossing_edges,
+                                              (*max_idx) * sizeof (edge));
+                 }
+               crossing_edges[i++] = e;
+             }
+           else
+             e->flags &= ~EDGE_CROSSING;
+         }
     }
+  *n_crossing_edges = i;
 }
 
-
-/* Rebuild all the NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes based
-   on the scope forest and the newly reordered basic blocks.  */
+/* Add NOTE_INSN_UNLIKELY_EXECUTED_CODE to top of basic block.   This note
+   is later used to mark the basic block to be put in the 
+   unlikely-to-be-executed section of the .o file.  */
 
 static void
-rebuild_scope_notes (forest)
-    scope_forest_info *forest;
+mark_bb_for_unlikely_executed_section (basic_block bb) 
 {
-  int i;
+  rtx cur_insn;
+  rtx insert_insn = NULL;
+  rtx new_note;
+  
+  /* Insert new NOTE immediately after  BASIC_BLOCK note.  */
 
-  if (forest->num_trees == 0)
-    return;
+  for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
+       cur_insn = NEXT_INSN (cur_insn))
+    if (GET_CODE (cur_insn) == NOTE
+       && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK)
+      {
+       insert_insn = cur_insn;
+       break;
+      }
+    
+  /* If basic block does not contain a NOTE_INSN_BASIC_BLOCK, there is
+     a major problem.  */
+  gcc_assert (insert_insn);
+
+  /* Insert note and assign basic block number to it.  */
+  
+  new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, 
+                             insert_insn);
+  NOTE_BASIC_BLOCK (new_note) = bb;
+}
 
-  /* Start by opening the scopes before the first basic block.  */
-  insert_inter_bb_scope_notes (NULL, BASIC_BLOCK (0));
+/* If any destination of a crossing edge does not have a label, add label;
+   Convert any fall-through crossing edges (for blocks that do not contain
+   a jump) to unconditional jumps.  */
 
-  /* Then, open and close scopes as needed between blocks.  */
-  for (i = 0; i < n_basic_blocks - 1; i++)
+static void 
+add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
+{
+  int i;
+  basic_block src;
+  basic_block dest;
+  rtx label;
+  rtx barrier;
+  rtx new_jump;
+  
+  for (i=0; i < n_crossing_edges; i++) 
     {
-      basic_block bb1 = BASIC_BLOCK (i);
-      basic_block bb2 = BASIC_BLOCK (i + 1);
-      if (REORDER_BLOCK_SCOPE (bb1) != REORDER_BLOCK_SCOPE (bb2))
-       insert_inter_bb_scope_notes (bb1, bb2);
-      insert_intra_bb_scope_notes (bb1);
-    }
+      if (crossing_edges[i]) 
+       {
+         src = crossing_edges[i]->src; 
+         dest = crossing_edges[i]->dest;
+         
+         /* Make sure dest has a label.  */
+         
+         if (dest && (dest != EXIT_BLOCK_PTR))
+           {
+             label = block_label (dest);
+             
+             /* Make sure source block ends with a jump.  */
+             
+             if (src && (src != ENTRY_BLOCK_PTR)) 
+               {
+                 if (!JUMP_P (BB_END (src)))
+                   /* bb just falls through.  */
+                   {
+                     /* make sure there's only one successor */
+                     gcc_assert (EDGE_COUNT (src->succs) == 1);
+                     
+                     /* Find label in dest block.  */
+                     label = block_label (dest);
+                     
+                     new_jump = emit_jump_insn_after (gen_jump (label), 
+                                                      BB_END (src));
+                     barrier = emit_barrier_after (new_jump);
+                     JUMP_LABEL (new_jump) = label;
+                     LABEL_NUSES (label) += 1;
+                     src->rbi->footer = unlink_insn_chain (barrier, barrier);
+                     /* Mark edge as non-fallthru.  */
+                     crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
+                   } /* end: 'if (GET_CODE ... '  */
+               } /* end: 'if (src && src->index...'  */
+           } /* end: 'if (dest && dest->index...'  */
+       } /* end: 'if (crossing_edges[i]...'  */
+    } /* end for loop  */
+}
+
+/* Find any bb's where the fall-through edge is a crossing edge (note that
+   these bb's must also contain a conditional jump; we've already
+   dealt with fall-through edges for blocks that didn't have a
+   conditional jump in the call to add_labels_and_missing_jumps).
+   Convert the fall-through edge to non-crossing edge by inserting a
+   new bb to fall-through into.  The new bb will contain an
+   unconditional jump (crossing edge) to the original fall through
+   destination.  */
+
+static void 
+fix_up_fall_thru_edges (void)
+{
+  basic_block cur_bb;
+  basic_block new_bb;
+  edge succ1;
+  edge succ2;
+  edge fall_thru;
+  edge cond_jump = NULL;
+  edge e;
+  bool cond_jump_crosses;
+  int invert_worked;
+  rtx old_jump;
+  rtx fall_thru_label;
+  rtx barrier;
+  
+  FOR_EACH_BB (cur_bb)
+    {
+      fall_thru = NULL;
+      if (EDGE_COUNT (cur_bb->succs) > 0)
+       succ1 = EDGE_SUCC (cur_bb, 0);
+      else
+       succ1 = NULL;
 
-  /* Finally, close the scopes after the last basic block.  */
-  insert_inter_bb_scope_notes (BASIC_BLOCK (n_basic_blocks - 1), NULL);
-  insert_intra_bb_scope_notes (BASIC_BLOCK (n_basic_blocks - 1));
+      if (EDGE_COUNT (cur_bb->succs) > 1)
+       succ2 = EDGE_SUCC (cur_bb, 1);
+      else
+       succ2 = NULL;
+      
+      /* Find the fall-through edge.  */
+      
+      if (succ1 
+         && (succ1->flags & EDGE_FALLTHRU))
+       {
+         fall_thru = succ1;
+         cond_jump = succ2;
+       }
+      else if (succ2 
+              && (succ2->flags & EDGE_FALLTHRU))
+       {
+         fall_thru = succ2;
+         cond_jump = succ1;
+       }
+      
+      if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
+       {
+         /* Check to see if the fall-thru edge is a crossing edge.  */
+       
+         if (fall_thru->flags & EDGE_CROSSING)
+           {
+             /* The fall_thru edge crosses; now check the cond jump edge, if
+                it exists.  */
+             
+             cond_jump_crosses = true;
+             invert_worked  = 0;
+             old_jump = BB_END (cur_bb);
+             
+             /* Find the jump instruction, if there is one.  */
+             
+             if (cond_jump)
+               {
+                 if (!(cond_jump->flags & EDGE_CROSSING))
+                   cond_jump_crosses = false;
+                 
+                 /* We know the fall-thru edge crosses; if the cond
+                    jump edge does NOT cross, and its destination is the
+                    next block in the bb order, invert the jump
+                    (i.e. fix it so the fall thru does not cross and
+                    the cond jump does).  */
+                 
+                 if (!cond_jump_crosses
+                     && cur_bb->rbi->next == cond_jump->dest)
+                   {
+                     /* Find label in fall_thru block. We've already added
+                        any missing labels, so there must be one.  */
+                     
+                     fall_thru_label = block_label (fall_thru->dest);
+
+                     if (old_jump && fall_thru_label)
+                       invert_worked = invert_jump (old_jump, 
+                                                    fall_thru_label,0);
+                     if (invert_worked)
+                       {
+                         fall_thru->flags &= ~EDGE_FALLTHRU;
+                         cond_jump->flags |= EDGE_FALLTHRU;
+                         update_br_prob_note (cur_bb);
+                         e = fall_thru;
+                         fall_thru = cond_jump;
+                         cond_jump = e;
+                         cond_jump->flags |= EDGE_CROSSING;
+                         fall_thru->flags &= ~EDGE_CROSSING;
+                       }
+                   }
+               }
+             
+             if (cond_jump_crosses || !invert_worked)
+               {
+                 /* This is the case where both edges out of the basic
+                    block are crossing edges. Here we will fix up the
+                    fall through edge. The jump edge will be taken care
+                    of later.  */
+                 
+                 new_bb = force_nonfallthru (fall_thru);  
+                 
+                 if (new_bb)
+                   {
+                     new_bb->rbi->next = cur_bb->rbi->next;
+                     cur_bb->rbi->next = new_bb;
+                     
+                     /* Make sure new fall-through bb is in same 
+                        partition as bb it's falling through from.  */
+
+                     BB_COPY_PARTITION (new_bb, cur_bb);
+                     EDGE_SUCC (new_bb, 0)->flags |= EDGE_CROSSING;
+                   }
+                 
+                 /* Add barrier after new jump */
+                 
+                 if (new_bb)
+                   {
+                     barrier = emit_barrier_after (BB_END (new_bb));
+                     new_bb->rbi->footer = unlink_insn_chain (barrier, 
+                                                              barrier);
+                   }
+                 else
+                   {
+                     barrier = emit_barrier_after (BB_END (cur_bb));
+                     cur_bb->rbi->footer = unlink_insn_chain (barrier,
+                                                              barrier);
+                   }
+               }
+           }
+       }
+    }
 }
 
+/* This function checks the destination blockof a "crossing jump" to
+   see if it has any crossing predecessors that begin with a code label
+   and end with an unconditional jump.  If so, it returns that predecessor
+   block.  (This is to avoid creating lots of new basic blocks that all
+   contain unconditional jumps to the same destination).  */
+
+static basic_block
+find_jump_block (basic_block jump_dest) 
+{ 
+  basic_block source_bb = NULL; 
+  edge e;
+  rtx insn;
+  edge_iterator ei;
+
+  FOR_EACH_EDGE (e, ei, jump_dest->preds)
+    if (e->flags & EDGE_CROSSING)
+      {
+       basic_block src = e->src;
+       
+       /* Check each predecessor to see if it has a label, and contains
+          only one executable instruction, which is an unconditional jump.
+          If so, we can use it.  */
+       
+       if (LABEL_P (BB_HEAD (src)))
+         for (insn = BB_HEAD (src); 
+              !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
+              insn = NEXT_INSN (insn))
+           {
+             if (INSN_P (insn)
+                 && insn == BB_END (src)
+                 && JUMP_P (insn)
+                 && !any_condjump_p (insn))
+               {
+                 source_bb = src;
+                 break;
+               }
+           }
+       
+       if (source_bb)
+         break;
+      }
+
+  return source_bb;
+}
 
-/* Free the storage associated with the scope tree at S.  */
+/* Find all BB's with conditional jumps that are crossing edges;
+   insert a new bb and make the conditional jump branch to the new
+   bb instead (make the new bb same color so conditional branch won't
+   be a 'crossing' edge).  Insert an unconditional jump from the
+   new bb to the original destination of the conditional jump.  */
 
 static void
-free_scope_forest_1 (s)
-    scope s;
+fix_crossing_conditional_branches (void)
 {
-  scope p, next;
-
-  for (p = s->inner; p; p = next)
+  basic_block cur_bb;
+  basic_block new_bb;
+  basic_block last_bb;
+  basic_block dest;
+  basic_block prev_bb;
+  edge succ1;
+  edge succ2;
+  edge crossing_edge;
+  edge new_edge;
+  rtx old_jump;
+  rtx set_src;
+  rtx old_label = NULL_RTX;
+  rtx new_label;
+  rtx new_jump;
+  rtx barrier;
+
+ last_bb = EXIT_BLOCK_PTR->prev_bb;
+  
+  FOR_EACH_BB (cur_bb)
     {
-      next = p->next;
-      free_scope_forest_1 (p);
-    }
+      crossing_edge = NULL;
+      if (EDGE_COUNT (cur_bb->succs) > 0)
+       succ1 = EDGE_SUCC (cur_bb, 0);
+      else
+       succ1 = NULL;
+    
+      if (EDGE_COUNT (cur_bb->succs) > 1)
+       succ2 = EDGE_SUCC (cur_bb, 1);
+      else
+       succ2 = NULL;
+      
+      /* We already took care of fall-through edges, so only one successor
+        can be a crossing edge.  */
+      
+      if (succ1 && (succ1->flags & EDGE_CROSSING))
+       crossing_edge = succ1;
+      else if (succ2 && (succ2->flags & EDGE_CROSSING))
+       crossing_edge = succ2;
+      
+      if (crossing_edge) 
+       {
+         old_jump = BB_END (cur_bb);
+         
+         /* Check to make sure the jump instruction is a
+            conditional jump.  */
+         
+         set_src = NULL_RTX;
+
+         if (any_condjump_p (old_jump))
+           {
+             if (GET_CODE (PATTERN (old_jump)) == SET)
+               set_src = SET_SRC (PATTERN (old_jump));
+             else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
+               {
+                 set_src = XVECEXP (PATTERN (old_jump), 0,0);
+                 if (GET_CODE (set_src) == SET)
+                   set_src = SET_SRC (set_src);
+                 else
+                   set_src = NULL_RTX;
+               }
+           }
 
-  if (s->bbs)
-    free (s->bbs);
-  free (s);
+         if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
+           {
+             if (GET_CODE (XEXP (set_src, 1)) == PC)
+               old_label = XEXP (set_src, 2);
+             else if (GET_CODE (XEXP (set_src, 2)) == PC)
+               old_label = XEXP (set_src, 1);
+             
+             /* Check to see if new bb for jumping to that dest has
+                already been created; if so, use it; if not, create
+                a new one.  */
+
+             new_bb = find_jump_block (crossing_edge->dest);
+             
+             if (new_bb)
+               new_label = block_label (new_bb);
+             else
+               {
+                 /* Create new basic block to be dest for
+                    conditional jump.  */
+                 
+                 new_bb = create_basic_block (NULL, NULL, last_bb);
+                 new_bb->rbi->next = last_bb->rbi->next;
+                 last_bb->rbi->next = new_bb;
+                 prev_bb = last_bb;
+                 last_bb = new_bb;
+                 
+                 /* Update register liveness information.  */
+                 
+                 new_bb->global_live_at_start = 
+                   OBSTACK_ALLOC_REG_SET (&flow_obstack);
+                 new_bb->global_live_at_end = 
+                   OBSTACK_ALLOC_REG_SET (&flow_obstack);
+                 COPY_REG_SET (new_bb->global_live_at_end,
+                               prev_bb->global_live_at_end);
+                 COPY_REG_SET (new_bb->global_live_at_start,
+                               prev_bb->global_live_at_end);
+                 
+                 /* Put appropriate instructions in new bb.  */
+                 
+                 new_label = gen_label_rtx ();
+                 emit_label_before (new_label, BB_HEAD (new_bb));
+                 BB_HEAD (new_bb) = new_label;
+                 
+                 if (GET_CODE (old_label) == LABEL_REF)
+                   {
+                     old_label = JUMP_LABEL (old_jump);
+                     new_jump = emit_jump_insn_after (gen_jump 
+                                                      (old_label), 
+                                                      BB_END (new_bb));
+                   }
+                 else
+                   {
+                     gcc_assert (HAVE_return
+                                 && GET_CODE (old_label) == RETURN);
+                     new_jump = emit_jump_insn_after (gen_return (), 
+                                                      BB_END (new_bb));
+                   }
+                 
+                 barrier = emit_barrier_after (new_jump);
+                 JUMP_LABEL (new_jump) = old_label;
+                 new_bb->rbi->footer = unlink_insn_chain (barrier, 
+                                                          barrier);
+                 
+                 /* Make sure new bb is in same partition as source
+                    of conditional branch.  */
+                 BB_COPY_PARTITION (new_bb, cur_bb);
+               }
+             
+             /* Make old jump branch to new bb.  */
+             
+             redirect_jump (old_jump, new_label, 0);
+             
+             /* Remove crossing_edge as predecessor of 'dest'.  */
+             
+             dest = crossing_edge->dest;
+             
+             redirect_edge_succ (crossing_edge, new_bb);
+             
+             /* Make a new edge from new_bb to old dest; new edge
+                will be a successor for new_bb and a predecessor
+                for 'dest'.  */
+             
+             if (EDGE_COUNT (new_bb->succs) == 0)
+               new_edge = make_edge (new_bb, dest, 0);
+             else
+               new_edge = EDGE_SUCC (new_bb, 0);
+             
+             crossing_edge->flags &= ~EDGE_CROSSING;
+             new_edge->flags |= EDGE_CROSSING;
+           }
+       }
+    }
 }
 
-
-/* Free the storage associated with the scope forest.  */
+/* Find any unconditional branches that cross between hot and cold
+   sections.  Convert them into indirect jumps instead.  */
 
 static void
-free_scope_forest (forest)
-    scope_forest_info *forest;
+fix_crossing_unconditional_branches (void)
 {
-  int i;
-  for (i = 0; i < forest->num_trees; i++)
-    free_scope_forest_1 (forest->trees[i]);
-}
+  basic_block cur_bb;
+  rtx last_insn;
+  rtx label;
+  rtx label_addr;
+  rtx indirect_jump_sequence;
+  rtx jump_insn = NULL_RTX;
+  rtx new_reg;
+  rtx cur_insn;
+  edge succ;
+
+  FOR_EACH_BB (cur_bb)
+    {
+      last_insn = BB_END (cur_bb);
+      succ = EDGE_SUCC (cur_bb, 0);
 
+      /* Check to see if bb ends in a crossing (unconditional) jump.  At
+         this point, no crossing jumps should be conditional.  */
 
-/* Visualize the scope forest.  */
+      if (JUMP_P (last_insn)
+         && (succ->flags & EDGE_CROSSING))
+       {
+         rtx label2, table;
 
-void
-dump_scope_forest (forest)
-    scope_forest_info *forest;
-{
-  if (forest->num_trees == 0)
-    fprintf (stderr, "\n< Empty scope forest >\n");
-  else
-    {
-      int i;
-      fprintf (stderr, "\n< Scope forest >\n");
-      for (i = 0; i < forest->num_trees; i++)
-       dump_scope_forest_1 (forest->trees[i], 0);
+         gcc_assert (!any_condjump_p (last_insn));
+
+         /* Make sure the jump is not already an indirect or table jump.  */
+
+         if (!computed_jump_p (last_insn)
+             && !tablejump_p (last_insn, &label2, &table))
+           {
+             /* We have found a "crossing" unconditional branch.  Now
+                we must convert it to an indirect jump.  First create
+                reference of label, as target for jump.  */
+             
+             label = JUMP_LABEL (last_insn);
+             label_addr = gen_rtx_LABEL_REF (Pmode, label);
+             LABEL_NUSES (label) += 1;
+             
+             /* Get a register to use for the indirect jump.  */
+             
+             new_reg = gen_reg_rtx (Pmode);
+             
+             /* Generate indirect the jump sequence.  */
+             
+             start_sequence ();
+             emit_move_insn (new_reg, label_addr);
+             emit_indirect_jump (new_reg);
+             indirect_jump_sequence = get_insns ();
+             end_sequence ();
+             
+             /* Make sure every instruction in the new jump sequence has
+                its basic block set to be cur_bb.  */
+             
+             for (cur_insn = indirect_jump_sequence; cur_insn;
+                  cur_insn = NEXT_INSN (cur_insn))
+               {
+                 BLOCK_FOR_INSN (cur_insn) = cur_bb;
+                 if (JUMP_P (cur_insn))
+                   jump_insn = cur_insn;
+               }
+             
+             /* Insert the new (indirect) jump sequence immediately before
+                the unconditional jump, then delete the unconditional jump.  */
+             
+             emit_insn_before (indirect_jump_sequence, last_insn);
+             delete_insn (last_insn);
+             
+             /* Make BB_END for cur_bb be the jump instruction (NOT the
+                barrier instruction at the end of the sequence...).  */
+             
+             BB_END (cur_bb) = jump_insn;
+           }
+       }
     }
 }
 
-
-/* Recursive portion of dump_scope_forest.  */
+/* Add REG_CROSSING_JUMP note to all crossing jump insns.  */
 
 static void
-dump_scope_forest_1 (s, indent)
-     scope s;
-     int indent;
+add_reg_crossing_jump_notes (void)
 {
-  scope p;
-  int i;
+  basic_block bb;
+  edge e;
+  edge_iterator ei;
+
+  FOR_EACH_BB (bb)
+    FOR_EACH_EDGE (e, ei, bb->succs)
+      if ((e->flags & EDGE_CROSSING)
+         && JUMP_P (BB_END (e->src)))
+       REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, 
+                                                        NULL_RTX, 
+                                                        REG_NOTES (BB_END 
+                                                                 (e->src)));
+}
 
-  if (s->bb_beg != NULL && s->bb_beg == s->bb_end
-      && REORDER_BLOCK_SCOPE (s->bb_beg)
-      && REORDER_BLOCK_SCOPE (s->bb_beg)->level + 1 == s->level)
+/* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be
+   put in a separate section of the .o file, to reduce paging and
+   improve cache performance (hopefully).  This can result in bits of
+   code from the same function being widely separated in the .o file.
+   However this is not obvious to the current bb structure.  Therefore
+   we must take care to ensure that: 1). There are no fall_thru edges
+   that cross between sections;  2). For those architectures which
+   have "short" conditional branches, all conditional branches that
+   attempt to cross between sections are converted to unconditional
+   branches; and, 3). For those architectures which have "short"
+   unconditional branches, all unconditional branches that attempt
+   to cross between sections are converted to indirect jumps.
+   
+   The code for fixing up fall_thru edges that cross between hot and
+   cold basic blocks does so by creating new basic blocks containing 
+   unconditional branches to the appropriate label in the "other" 
+   section.  The new basic block is then put in the same (hot or cold)
+   section as the original conditional branch, and the fall_thru edge
+   is modified to fall into the new basic block instead.  By adding
+   this level of indirection we end up with only unconditional branches
+   crossing between hot and cold sections.  
+   
+   Conditional branches are dealt with by adding a level of indirection.
+   A new basic block is added in the same (hot/cold) section as the 
+   conditional branch, and the conditional branch is retargeted to the
+   new basic block.  The new basic block contains an unconditional branch
+   to the original target of the conditional branch (in the other section).
+
+   Unconditional branches are dealt with by converting them into
+   indirect jumps.  */
+
+static void 
+fix_edges_for_rarely_executed_code (edge *crossing_edges, 
+                                   int n_crossing_edges)
+{
+  /* Make sure the source of any crossing edge ends in a jump and the
+     destination of any crossing edge has a label.  */
+  
+  add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
+  
+  /* Convert all crossing fall_thru edges to non-crossing fall
+     thrus to unconditional jumps (that jump to the original fall
+     thru dest).  */
+  
+  fix_up_fall_thru_edges ();
+  
+  /* Only do the parts necessary for writing separate sections if
+     the target architecture has the ability to write separate sections
+     (i.e. it has named sections).  Otherwise, the hot/cold partitioning
+     information will be used when reordering blocks to try to put all
+     the hot blocks together, then all the cold blocks, but no actual
+     section partitioning will be done.  */
+
+  if (targetm.have_named_sections)
     {
-      fprintf (stderr, "%*s", indent, "");
-      fprintf (stderr, "BB%d:\n", s->bb_beg->index);
-    }
-
-  fprintf (stderr, "%*s", indent, "");
-  fprintf (stderr, "{ level %d (block %p)\n", s->level,
-          NOTE_BLOCK (s->note_beg));
-
-  fprintf (stderr, "%*s%s", indent, "", "bbs:");
-  for (i = 0; i < s->num_bbs; i++)
-    fprintf (stderr, " %d", s->bbs[i]->index);
-  fprintf (stderr, "\n");
+      /* If the architecture does not have conditional branches that can
+        span all of memory, convert crossing conditional branches into
+        crossing unconditional branches.  */
+  
+      if (!HAS_LONG_COND_BRANCH)
+       fix_crossing_conditional_branches ();
   
-  for (p = s->inner; p; p = p->next)
-    dump_scope_forest_1 (p, indent + 2);
+      /* If the architecture does not have unconditional branches that
+        can span all of memory, convert crossing unconditional branches
+        into indirect jumps.  Since adding an indirect jump also adds
+        a new register usage, update the register usage information as
+        well.  */
+      
+      if (!HAS_LONG_UNCOND_BRANCH)
+       {
+         fix_crossing_unconditional_branches ();
+         reg_scan (get_insns(), max_reg_num (), 1);
+       }
 
-  fprintf (stderr, "%*s", indent, "");
-  fprintf (stderr, "}\n");
+      add_reg_crossing_jump_notes ();
+    }
 }
 
-
-/* Reorder basic blocks.  */
+/* Reorder basic blocks.  The main entry point to this file.  FLAGS is
+   the set of flags to pass to cfg_layout_initialize().  */
 
 void
-reorder_basic_blocks ()
+reorder_basic_blocks (unsigned int flags)
 {
-  int i, j;
-  struct loops loops_info;
-  int num_loops;
-  scope_forest_info forest;
-
-  if (profile_arc_flag)
-    return;
+  int n_traces;
+  int i;
+  struct trace *traces;
 
   if (n_basic_blocks <= 1)
     return;
 
-  /* Exception edges are not currently handled.  */
-  for (i = 0; i < n_basic_blocks; i++)
-    {
-      edge e;
-
-      for (e = BASIC_BLOCK (i)->succ; e && ! (e->flags & EDGE_EH);
-          e = e->succ_next)
-       continue;
-
-      if (e && (e->flags & EDGE_EH))
-       return;
-    }
-
-  reorder_index = 0;
-
-  /* Find natural loops using the CFG.  */
-  num_loops = flow_loops_find (&loops_info);
+  if (targetm.cannot_modify_jumps_p ())
+    return;
 
-  /* Dump loop information.  */
-  flow_loops_dump (&loops_info, rtl_dump_file, 0);
+  timevar_push (TV_REORDER_BLOCKS);
 
-  reorder_last_visited = BASIC_BLOCK (0);
+  cfg_layout_initialize (flags);
 
-  for (i = 0; i < n_basic_blocks; i++)
-    {
-      basic_block bbi = BASIC_BLOCK (i);
-      bbi->aux = xcalloc (1, sizeof (struct reorder_block_def));
-      *((struct reorder_block_def *)bbi->aux) = rbd_init;
-    }
+  set_edge_can_fallthru_flag ();
+  mark_dfs_back_edges ();
 
-  build_scope_forest (&forest);
-  remove_scope_notes ();
+  /* We are estimating the length of uncond jump insn only once since the code
+     for getting the insn length always returns the minimal length now.  */
+  if (uncond_jump_length == 0)
+    uncond_jump_length = get_uncond_jump_length ();
 
-  for (i = 0; i < n_basic_blocks; i++)
+  /* We need to know some information for each basic block.  */
+  array_size = GET_ARRAY_SIZE (last_basic_block);
+  bbd = xmalloc (array_size * sizeof (bbro_basic_block_data));
+  for (i = 0; i < array_size; i++)
     {
-      basic_block bbi = BASIC_BLOCK (i);
-      REORDER_BLOCK_EFF_END (bbi) = skip_insns_after_block (bbi);
-      if (i == 0)
-       REORDER_BLOCK_EFF_HEAD (bbi) = get_insns ();
-      else 
-       {
-         rtx prev_eff_end = REORDER_BLOCK_EFF_END (BASIC_BLOCK (i - 1));
-         REORDER_BLOCK_EFF_HEAD (bbi) = NEXT_INSN (prev_eff_end);
-       }
+      bbd[i].start_of_trace = -1;
+      bbd[i].end_of_trace = -1;
+      bbd[i].heap = NULL;
+      bbd[i].node = NULL;
     }
 
-  /* If we've not got epilogue in RTL, we must fallthru to the exit.
-     Force the last block to be at the end.  */
-  /* ??? Some ABIs (e.g. MIPS) require the return insn to be at the
-     end of the function for stack unwinding purposes.  */
+  traces = xmalloc (n_basic_blocks * sizeof (struct trace));
+  n_traces = 0;
+  find_traces (&n_traces, traces);
+  connect_traces (n_traces, traces);
+  FREE (traces);
+  FREE (bbd);
 
-#ifndef HAVE_epilogue
-#define HAVE_epilogue 0
-#endif
+  if (dump_file)
+    dump_flow_info (dump_file);
 
-  if (! HAVE_epilogue)
-    {
-      basic_block last = BASIC_BLOCK (n_basic_blocks - 1);
-      REORDER_BLOCK_INDEX (last) = n_basic_blocks - 1;
-      REORDER_BLOCK_FLAGS (last) |= REORDER_BLOCK_VISITED;
-    }
-      
-  make_reorder_chain (BASIC_BLOCK (0));
-
-  fixup_reorder_chain ();
+  if (flag_reorder_blocks_and_partition
+      && targetm.have_named_sections)
+    add_unlikely_executed_notes ();
 
-#ifdef ENABLE_CHECKING
-  verify_insn_chain ();
-#endif
+  cfg_layout_finalize ();
 
-  /* Put basic_block_info in new order.  */
-  for (i = 0; i < n_basic_blocks - 1; i++)
-    {
-      for (j = i; i != REORDER_BLOCK_INDEX (BASIC_BLOCK (j)); j++)
-       continue;
-
-      if (REORDER_BLOCK_INDEX (BASIC_BLOCK (j)) == i
-         && i != j)
-       {
-         basic_block tempbb;
-         int temprbi;
-         int rbi = REORDER_BLOCK_INDEX (BASIC_BLOCK (j));
-
-         temprbi = BASIC_BLOCK (rbi)->index;
-         BASIC_BLOCK (rbi)->index = BASIC_BLOCK (j)->index;
-         BASIC_BLOCK (j)->index = temprbi;
-         tempbb = BASIC_BLOCK (rbi);
-         BASIC_BLOCK (rbi) = BASIC_BLOCK (j);
-         BASIC_BLOCK (j) = tempbb;
-       }
-    }
+  timevar_pop (TV_REORDER_BLOCKS);
+}
 
-  rebuild_scope_notes (&forest);
-  free_scope_forest (&forest);
-  reorder_blocks ();
+/* This function is the main 'entrance' for the optimization that
+   partitions hot and cold basic blocks into separate sections of the
+   .o file (to improve performance and cache locality).  Ideally it
+   would be called after all optimizations that rearrange the CFG have
+   been called.  However part of this optimization may introduce new
+   register usage, so it must be called before register allocation has
+   occurred.  This means that this optimization is actually called
+   well before the optimization that reorders basic blocks (see
+   function above).
+
+   This optimization checks the feedback information to determine
+   which basic blocks are hot/cold and causes reorder_basic_blocks to
+   add NOTE_INSN_UNLIKELY_EXECUTED_CODE to non-hot basic blocks.  The
+   presence or absence of this note is later used for writing out
+   sections in the .o file.  Because hot and cold sections can be
+   arbitrarily large (within the bounds of memory), far beyond the
+   size of a single function, it is necessary to fix up all edges that
+   cross section boundaries, to make sure the instructions used can
+   actually span the required distance.  The fixes are described
+   below.
+
+   Fall-through edges must be changed into jumps; it is not safe or
+   legal to fall through across a section boundary.  Whenever a
+   fall-through edge crossing a section boundary is encountered, a new
+   basic block is inserted (in the same section as the fall-through
+   source), and the fall through edge is redirected to the new basic
+   block.  The new basic block contains an unconditional jump to the
+   original fall-through target.  (If the unconditional jump is
+   insufficient to cross section boundaries, that is dealt with a
+   little later, see below).
+
+   In order to deal with architectures that have short conditional
+   branches (which cannot span all of memory) we take any conditional
+   jump that attempts to cross a section boundary and add a level of
+   indirection: it becomes a conditional jump to a new basic block, in
+   the same section.  The new basic block contains an unconditional
+   jump to the original target, in the other section.
+
+   For those architectures whose unconditional branch is also
+   incapable of reaching all of memory, those unconditional jumps are
+   converted into indirect jumps, through a register.
+
+   IMPORTANT NOTE: This optimization causes some messy interactions
+   with the cfg cleanup optimizations; those optimizations want to
+   merge blocks wherever possible, and to collapse indirect jump
+   sequences (change "A jumps to B jumps to C" directly into "A jumps
+   to C").  Those optimizations can undo the jump fixes that
+   partitioning is required to make (see above), in order to ensure
+   that jumps attempting to cross section boundaries are really able
+   to cover whatever distance the jump requires (on many architectures
+   conditional or unconditional jumps are not able to reach all of
+   memory).  Therefore tests have to be inserted into each such
+   optimization to make sure that it does not undo stuff necessary to
+   cross partition boundaries.  This would be much less of a problem
+   if we could perform this optimization later in the compilation, but
+   unfortunately the fact that we may need to create indirect jumps
+   (through registers) requires that this optimization be performed
+   before register allocation.  */
 
-#ifdef ENABLE_CHECKING
-  verify_flow_info ();
-#endif
+void
+partition_hot_cold_basic_blocks (void)
+{
+  basic_block cur_bb;
+  edge *crossing_edges;
+  int n_crossing_edges;
+  int max_edges = 2 * last_basic_block;
+  
+  if (n_basic_blocks <= 1)
+    return;
+  
+  crossing_edges = xcalloc (max_edges, sizeof (edge));
 
-  for (i = 0; i < n_basic_blocks; i++)
-    free (BASIC_BLOCK (i)->aux);
+  cfg_layout_initialize (0);
+  
+  FOR_EACH_BB (cur_bb)
+    if (cur_bb->index >= 0
+       && cur_bb->next_bb->index >= 0)
+      cur_bb->rbi->next = cur_bb->next_bb;
+  
+  find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges, 
+                                                       &n_crossing_edges, 
+                                                       &max_edges);
 
-  /* Free loop information.  */
-  flow_loops_free (&loops_info);
+  if (n_crossing_edges > 0)
+    fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
+  
+  free (crossing_edges);
 
+  cfg_layout_finalize();
 }
-