X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fbb-reorder.c;h=412f78a24da80eaa586d78253ef6c0151c6769fd;hp=72d3902e7fc6f49eade319ff4ff865fc91d0817c;hb=2166ab0e59a31376f980893ddeb85a998c9cd9cd;hpb=63f390bf43d4c01f4fb7011eb92aef4be0b1fd40 diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c index 72d3902e7fc..412f78a24da 100644 --- a/gcc/bb-reorder.c +++ b/gcc/bb-reorder.c @@ -1,5 +1,5 @@ /* Basic block reordering routines for the GNU compiler. - Copyright (C) 2000 Free Software Foundation, Inc. + Copyright (C) 2000, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of GCC. @@ -15,1421 +15,2287 @@ You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 59 Temple Place - Suite 330, Boston, MA - 02111-1307, USA. */ - -/* References: - - "Profile Guided Code Positioning" - Pettis and Hanson; PLDI '90. - - TODO: - - (1) Consider: - - if (p) goto A; // predict taken - foo (); - A: - if (q) goto B; // predict taken - bar (); - B: - baz (); - return; - - We'll currently reorder this as - - if (!p) goto C; - A: - if (!q) goto D; - B: - baz (); - return; - D: - bar (); - goto B; - C: - foo (); - goto A; - - A better ordering is - - if (!p) goto C; - if (!q) goto D; - B: - baz (); - return; - C: - foo (); - if (q) goto B; - D: - bar (); - goto B; - - This requires that we be able to duplicate the jump at A, and - adjust the graph traversal such that greedy placement doesn't - fix D before C is considered. - - (2) Coordinate with shorten_branches to minimize the number of - long branches. - - (3) Invent a method by which sufficiently non-predicted code can - be moved to either the end of the section or another section - entirely. Some sort of NOTE_INSN note would work fine. - - This completely scroggs all debugging formats, so the user - would have to explicitly ask for it. + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* This (greedy) algorithm constructs traces in several rounds. + The construction starts from "seeds". The seed for the first round + is the entry point of function. When there are more than one seed + that one is selected first that has the lowest key in the heap + (see function bb_to_key). Then the algorithm repeatedly adds the most + probable successor to the end of a trace. Finally it connects the traces. + + There are two parameters: Branch Threshold and Exec Threshold. + If the edge to a successor of the actual basic block is lower than + Branch Threshold or the frequency of the successor is lower than + Exec Threshold the successor will be the seed in one of the next rounds. + Each round has these parameters lower than the previous one. + The last round has to have these parameters set to zero + so that the remaining blocks are picked up. + + The algorithm selects the most probable successor from all unvisited + successors and successors that have been added to this trace. + The other successors (that has not been "sent" to the next round) will be + other seeds for this round and the secondary traces will start in them. + If the successor has not been visited in this trace it is added to the trace + (however, there is some heuristic for simple branches). + If the successor has been visited in this trace the loop has been found. + If the loop has many iterations the loop is rotated so that the + source block of the most probable edge going out from the loop + is the last block of the trace. + If the loop has few iterations and there is no edge from the last block of + the loop going out from loop the loop header is duplicated. + Finally, the construction of the trace is terminated. + + When connecting traces it first checks whether there is an edge from the + last block of one trace to the first block of another trace. + When there are still some unconnected traces it checks whether there exists + a basic block BB such that BB is a successor of the last bb of one trace + and BB is a predecessor of the first block of another trace. In this case, + BB is duplicated and the traces are connected through this duplicate. + The rest of traces are simply connected so there will be a jump to the + beginning of the rest of trace. + + + References: + + "Software Trace Cache" + A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999 + http://citeseer.nj.nec.com/15361.html + */ #include "config.h" #include "system.h" -#include "tree.h" +#include "coretypes.h" +#include "tm.h" #include "rtl.h" -#include "tm_p.h" -#include "hard-reg-set.h" -#include "basic-block.h" -#include "insn-config.h" #include "regs.h" #include "flags.h" +#include "timevar.h" #include "output.h" +#include "cfglayout.h" +#include "fibheap.h" +#include "target.h" #include "function.h" -#include "toplev.h" -#include "recog.h" -#include "expr.h" +#include "tm_p.h" #include "obstack.h" +#include "expr.h" +#include "params.h" +#include "toplev.h" +#include "tree-pass.h" - -#ifndef HAVE_epilogue -#define HAVE_epilogue 0 +#ifndef HAVE_conditional_execution +#define HAVE_conditional_execution 0 #endif +/* The number of rounds. In most cases there will only be 4 rounds, but + when partitioning hot and cold basic blocks into separate sections of + the .o file there will be an extra round.*/ +#define N_ROUNDS 5 -/* The contents of the current function definition are allocated - in this obstack, and all are freed at the end of the function. - For top-level functions, this is temporary_obstack. - Separate obstacks are made for nested functions. */ - -extern struct obstack flow_obstack; +/* Stubs in case we don't have a return insn. + We have to check at runtime too, not only compiletime. */ +#ifndef HAVE_return +#define HAVE_return 0 +#define gen_return() NULL_RTX +#endif -/* Structure to hold information about lexical scopes. */ -typedef struct scope_def -{ - int level; - /* The NOTE_INSN_BLOCK_BEG that started this scope. */ - rtx note_beg; +/* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */ +static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0}; - /* The NOTE_INSN_BLOCK_END that ended this scope. */ - rtx note_end; +/* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */ +static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0}; - /* The bb containing note_beg (if any). */ - basic_block bb_beg; +/* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry + block the edge destination is not duplicated while connecting traces. */ +#define DUPLICATION_THRESHOLD 100 - /* The bb containing note_end (if any). */ - basic_block bb_end; +/* Length of unconditional jump instruction. */ +static int uncond_jump_length; - /* List of basic blocks contained within this scope. */ - basic_block *bbs; +/* Structure to hold needed information for each basic block. */ +typedef struct bbro_basic_block_data_def +{ + /* Which trace is the bb start of (-1 means it is not a start of a trace). */ + int start_of_trace; - /* Number of blocks contained within this scope. */ - int num_bbs; + /* Which trace is the bb end of (-1 means it is not an end of a trace). */ + int end_of_trace; - /* The outer scope or NULL if outermost scope. */ - struct scope_def *outer; + /* Which trace is the bb in? */ + int in_trace; - /* The first inner scope or NULL if innermost scope. */ - struct scope_def *inner; + /* Which heap is BB in (if any)? */ + fibheap_t heap; - /* The last inner scope or NULL if innermost scope. */ - struct scope_def *inner_last; + /* Which heap node is BB in (if any)? */ + fibnode_t node; +} bbro_basic_block_data; - /* Link to the next (sibling) scope. */ - struct scope_def *next; -} *scope; +/* The current size of the following dynamic array. */ +static int array_size; +/* The array which holds needed information for basic blocks. */ +static bbro_basic_block_data *bbd; -/* Structure to hold information about the scope forest. */ -typedef struct -{ - /* Number of trees in forest. */ - int num_trees; +/* To avoid frequent reallocation the size of arrays is greater than needed, + the number of elements is (not less than) 1.25 * size_wanted. */ +#define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5) - /* List of tree roots. */ - scope *trees; -} scope_forest_info; +/* Free the memory and set the pointer to NULL. */ +#define FREE(P) (gcc_assert (P), free (P), P = 0) -/* Structure to hold information about the blocks during reordering. */ -typedef struct reorder_block_def +/* Structure for holding information about a trace. */ +struct trace { - rtx eff_head; - rtx eff_end; - scope scope; - basic_block next; - int index; - int visited; -} *reorder_block_def; + /* First and last basic block of the trace. */ + basic_block first, last; -#define RBI(BB) ((reorder_block_def) (BB)->aux) + /* The round of the STC creation which this trace was found in. */ + int round; -/* Holds the interesting trailing notes for the function. */ -static rtx function_tail_eff_head; + /* The length (i.e. the number of basic blocks) of the trace. */ + int length; +}; +/* Maximum frequency and count of one of the entry blocks. */ +static int max_entry_frequency; +static gcov_type max_entry_count; /* Local function prototypes. */ -static rtx skip_insns_after_block PARAMS ((basic_block)); -static void record_effective_endpoints PARAMS ((void)); -static void make_reorder_chain PARAMS ((void)); -static basic_block make_reorder_chain_1 PARAMS ((basic_block, basic_block)); -static rtx label_for_bb PARAMS ((basic_block)); -static rtx emit_jump_to_block_after PARAMS ((basic_block, rtx)); -static void fixup_reorder_chain PARAMS ((void)); -static void relate_bbs_with_scopes PARAMS ((scope)); -static scope make_new_scope PARAMS ((int, rtx)); -static void build_scope_forest PARAMS ((scope_forest_info *)); -static void remove_scope_notes PARAMS ((void)); -static void insert_intra_1 PARAMS ((scope, rtx *)); -static void insert_intra_bb_scope_notes PARAMS ((basic_block)); -static void insert_inter_bb_scope_notes PARAMS ((basic_block, basic_block)); -static void rebuild_scope_notes PARAMS ((scope_forest_info *)); -static void free_scope_forest_1 PARAMS ((scope)); -static void free_scope_forest PARAMS ((scope_forest_info *)); -void dump_scope_forest PARAMS ((scope_forest_info *)); -static void dump_scope_forest_1 PARAMS ((scope, int)); -static rtx get_next_bb_note PARAMS ((rtx)); -static rtx get_prev_bb_note PARAMS ((rtx)); - -void verify_insn_chain PARAMS ((void)); +static void find_traces (int *, struct trace *); +static basic_block rotate_loop (edge, struct trace *, int); +static void mark_bb_visited (basic_block, int); +static void find_traces_1_round (int, int, gcov_type, struct trace *, int *, + int, fibheap_t *, int); +static basic_block copy_bb (basic_block, edge, basic_block, int); +static fibheapkey_t bb_to_key (basic_block); +static bool better_edge_p (basic_block, edge, int, int, int, int, edge); +static void connect_traces (int, struct trace *); +static bool copy_bb_p (basic_block, int); +static int get_uncond_jump_length (void); +static bool push_to_next_round_p (basic_block, int, int, int, gcov_type); +static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *, + int *, + int *); +static void add_labels_and_missing_jumps (edge *, int); +static void add_reg_crossing_jump_notes (void); +static void fix_up_fall_thru_edges (void); +static void fix_edges_for_rarely_executed_code (edge *, int); +static void fix_crossing_conditional_branches (void); +static void fix_crossing_unconditional_branches (void); -/* Skip over inter-block insns occurring after BB which are typically - associated with BB (e.g., barriers). If there are any such insns, - we return the last one. Otherwise, we return the end of BB. */ - -static rtx -skip_insns_after_block (bb) - basic_block bb; +/* Check to see if bb should be pushed into the next round of trace + collections or not. Reasons for pushing the block forward are 1). + If the block is cold, we are doing partitioning, and there will be + another round (cold partition blocks are not supposed to be + collected into traces until the very last round); or 2). There will + be another round, and the basic block is not "hot enough" for the + current round of trace collection. */ + +static bool +push_to_next_round_p (basic_block bb, int round, int number_of_rounds, + int exec_th, gcov_type count_th) { - rtx insn, last_insn, next_head, prev; + bool there_exists_another_round; + bool block_not_hot_enough; - next_head = NULL_RTX; - if (bb->index + 1 != n_basic_blocks) - next_head = BASIC_BLOCK (bb->index + 1)->head; - - for (last_insn = insn = bb->end; (insn = NEXT_INSN (insn)); ) - { - if (insn == next_head) - break; + there_exists_another_round = round < number_of_rounds - 1; - switch (GET_CODE (insn)) - { - case BARRIER: - last_insn = insn; - continue; - - case NOTE: - switch (NOTE_LINE_NUMBER (insn)) - { - case NOTE_INSN_LOOP_END: - case NOTE_INSN_BLOCK_END: - last_insn = insn; - continue; - case NOTE_INSN_DELETED: - case NOTE_INSN_DELETED_LABEL: - continue; - - default: - continue; - break; - } - break; - - case CODE_LABEL: - if (NEXT_INSN (insn) - && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN - && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC - || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC)) - { - insn = NEXT_INSN (insn); - last_insn = insn; - continue; - } - break; - - default: - break; - } - - break; - } - /* It is possible to hit contradicting sequence. For instance: - - jump_insn - NOTE_INSN_LOOP_BEG - barrier + block_not_hot_enough = (bb->frequency < exec_th + || bb->count < count_th + || probably_never_executed_bb_p (bb)); - Where barrier belongs to jump_insn, but the note does not. - This can be created by removing the basic block originally - following NOTE_INSN_LOOP_BEG. - - In such case reorder the notes. */ - for (insn = last_insn; insn != bb->end; insn = prev) - { - prev = PREV_INSN (insn); - if (GET_CODE (insn) == NOTE) - switch (NOTE_LINE_NUMBER (insn)) - { - case NOTE_INSN_LOOP_END: - case NOTE_INSN_BLOCK_END: - case NOTE_INSN_DELETED: - case NOTE_INSN_DELETED_LABEL: - continue; - default: - reorder_insns (insn, insn, last_insn); - } - } - - return last_insn; + if (there_exists_another_round + && block_not_hot_enough) + return true; + else + return false; } - -/* Locate the effective beginning and end of the insn chain for each - block, as defined by skip_insns_after_block above. */ +/* Find the traces for Software Trace Cache. Chain each trace through + RBI()->next. Store the number of traces to N_TRACES and description of + traces to TRACES. */ static void -record_effective_endpoints () +find_traces (int *n_traces, struct trace *traces) { - rtx next_insn = get_insns (); int i; - - for (i = 0; i < n_basic_blocks; ++i) - { - basic_block bb = BASIC_BLOCK (i); - rtx end; - - RBI (bb)->eff_head = next_insn; - end = skip_insns_after_block (bb); - RBI (bb)->eff_end = end; - next_insn = NEXT_INSN (end); - } - function_tail_eff_head = next_insn; -} + int number_of_rounds; + edge e; + edge_iterator ei; + fibheap_t heap; + /* Add one extra round of trace collection when partitioning hot/cold + basic blocks into separate sections. The last round is for all the + cold blocks (and ONLY the cold blocks). */ -/* Compute an ordering for a subgraph beginning with block BB. Record the - ordering in RBI()->index and chained through RBI()->next. */ + number_of_rounds = N_ROUNDS - 1; -static void -make_reorder_chain () -{ - basic_block last_block = NULL; - basic_block prev = NULL; - int nbb_m1 = n_basic_blocks - 1; - - /* If we've not got epilogue in RTL, we must fallthru to the exit. - Force the last block to be at the end. */ - /* ??? Some ABIs (e.g. MIPS) require the return insn to be at the - end of the function for stack unwinding purposes. */ - if (! HAVE_epilogue) + /* Insert entry points of function into heap. */ + heap = fibheap_new (); + max_entry_frequency = 0; + max_entry_count = 0; + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { - last_block = BASIC_BLOCK (nbb_m1); - RBI (last_block)->visited = 1; - nbb_m1 -= 1; + bbd[e->dest->index].heap = heap; + bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest), + e->dest); + if (e->dest->frequency > max_entry_frequency) + max_entry_frequency = e->dest->frequency; + if (e->dest->count > max_entry_count) + max_entry_count = e->dest->count; } - /* Loop until we've placed every block. */ - do + /* Find the traces. */ + for (i = 0; i < number_of_rounds; i++) { - int i; - basic_block next = NULL; + gcov_type count_threshold; - /* Find the next unplaced block. */ - /* ??? Get rid of this loop, and track which blocks are not yet - placed more directly, so as to avoid the O(N^2) worst case. - Perhaps keep a doubly-linked list of all to-be-placed blocks; - remove from the list as we place. The head of that list is - what we're looking for here. */ + if (dump_file) + fprintf (dump_file, "STC - round %d\n", i + 1); - for (i = 0; i <= nbb_m1; ++i) - { - basic_block bb = BASIC_BLOCK (i); - if (! RBI (bb)->visited) - { - next = bb; - break; - } - } - if (! next) - abort (); + if (max_entry_count < INT_MAX / 1000) + count_threshold = max_entry_count * exec_threshold[i] / 1000; + else + count_threshold = max_entry_count / 1000 * exec_threshold[i]; - prev = make_reorder_chain_1 (next, prev); + find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000, + max_entry_frequency * exec_threshold[i] / 1000, + count_threshold, traces, n_traces, i, &heap, + number_of_rounds); } - while (RBI (prev)->index < nbb_m1); + fibheap_delete (heap); - /* Terminate the chain. */ - if (! HAVE_epilogue) + if (dump_file) { - RBI (prev)->next = last_block; - RBI (last_block)->index = RBI (prev)->index + 1; - prev = last_block; + for (i = 0; i < *n_traces; i++) + { + basic_block bb; + fprintf (dump_file, "Trace %d (round %d): ", i + 1, + traces[i].round + 1); + for (bb = traces[i].first; bb != traces[i].last; bb = bb->aux) + fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency); + fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency); + } + fflush (dump_file); } - RBI (prev)->next = NULL; } -/* A helper function for make_reorder_chain. - - We do not follow EH edges, or non-fallthru edges to noreturn blocks. - These are assumed to be the error condition and we wish to cluster - all of them at the very end of the function for the benefit of cache - locality for the rest of the function. - - ??? We could do slightly better by noticing earlier that some subgraph - has all paths leading to noreturn functions, but for there to be more - than one block in such a subgraph is rare. */ +/* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE + (with sequential number TRACE_N). */ static basic_block -make_reorder_chain_1 (bb, prev) - basic_block bb; - basic_block prev; +rotate_loop (edge back_edge, struct trace *trace, int trace_n) { - edge e; - basic_block next; - rtx note; - - /* Mark this block visited. */ - if (prev) + basic_block bb; + + /* Information about the best end (end after rotation) of the loop. */ + basic_block best_bb = NULL; + edge best_edge = NULL; + int best_freq = -1; + gcov_type best_count = -1; + /* The best edge is preferred when its destination is not visited yet + or is a start block of some trace. */ + bool is_preferred = false; + + /* Find the most frequent edge that goes out from current trace. */ + bb = back_edge->dest; + do { - int new_index; - - restart: - RBI (prev)->next = bb; - new_index = RBI (prev)->index + 1; - RBI (bb)->index = new_index; - - if (rtl_dump_file && prev->index + 1 != bb->index) - fprintf (rtl_dump_file, "Reordering block %d (%d) after %d (%d)\n", - bb->index, RBI (bb)->index, prev->index, RBI (prev)->index); + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->dest != EXIT_BLOCK_PTR + && e->dest->il.rtl->visited != trace_n + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX)) + { + if (is_preferred) + { + /* The best edge is preferred. */ + if (!e->dest->il.rtl->visited + || bbd[e->dest->index].start_of_trace >= 0) + { + /* The current edge E is also preferred. */ + int freq = EDGE_FREQUENCY (e); + if (freq > best_freq || e->count > best_count) + { + best_freq = freq; + best_count = e->count; + best_edge = e; + best_bb = bb; + } + } + } + else + { + if (!e->dest->il.rtl->visited + || bbd[e->dest->index].start_of_trace >= 0) + { + /* The current edge E is preferred. */ + is_preferred = true; + best_freq = EDGE_FREQUENCY (e); + best_count = e->count; + best_edge = e; + best_bb = bb; + } + else + { + int freq = EDGE_FREQUENCY (e); + if (!best_edge || freq > best_freq || e->count > best_count) + { + best_freq = freq; + best_count = e->count; + best_edge = e; + best_bb = bb; + } + } + } + } + bb = bb->aux; } - else - RBI (bb)->index = 0; - RBI (bb)->visited = 1; - prev = bb; - - if (bb->succ == NULL) - return prev; + while (bb != back_edge->dest); - /* Find the most probable block. */ - - next = NULL; - if (any_condjump_p (bb->end) - && (note = find_reg_note (bb->end, REG_BR_PROB, 0)) != NULL) + if (best_bb) { - int taken, probability; - edge e_taken, e_fall; - - probability = INTVAL (XEXP (note, 0)); - taken = probability > REG_BR_PROB_BASE / 2; - - /* Find the normal taken edge and the normal fallthru edge. - - Note, conditional jumps with other side effects may not - be fully optimized. In this case it is possible for - the conditional jump to branch to the same location as - the fallthru path. - - We should probably work to improve optimization of that - case; however, it seems silly not to also deal with such - problems here if they happen to occur. */ - - e_taken = e_fall = NULL; - for (e = bb->succ; e ; e = e->succ_next) + /* Rotate the loop so that the BEST_EDGE goes out from the last block of + the trace. */ + if (back_edge->dest == trace->first) { - if (e->flags & EDGE_FALLTHRU) - e_fall = e; - else if (! (e->flags & EDGE_EH)) - e_taken = e; + trace->first = best_bb->aux; } + else + { + basic_block prev_bb; - next = (taken ? e_taken : e_fall)->dest; - } + for (prev_bb = trace->first; + prev_bb->aux != back_edge->dest; + prev_bb = prev_bb->aux) + ; + prev_bb->aux = best_bb->aux; - /* In the absence of a prediction, disturb things as little as possible - by selecting the old "next" block from the list of successors. If - there had been a fallthru edge, that will be the one. */ - if (! next) - { - for (e = bb->succ; e ; e = e->succ_next) - if (e->dest->index == bb->index + 1) - { - if ((e->flags & EDGE_FALLTHRU) - || (e->dest->succ - && ! (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))) - next = e->dest; - break; - } + /* Try to get rid of uncond jump to cond jump. */ + if (single_succ_p (prev_bb)) + { + basic_block header = single_succ (prev_bb); + + /* Duplicate HEADER if it is a small block containing cond jump + in the end. */ + if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0) + && !find_reg_note (BB_END (header), REG_CROSSING_JUMP, + NULL_RTX)) + copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n); + } + } } - - /* Make sure we didn't select a silly next block. */ - if (! next || next == EXIT_BLOCK_PTR || RBI (next)->visited) - next = NULL; - - /* Recurse on the successors. Unroll the last call, as the normal - case is exactly one or two edges, and we can tail recurse. */ - for (e = bb->succ; e; e = e->succ_next) - if (e->dest != EXIT_BLOCK_PTR - && ! RBI (e->dest)->visited - && e->dest->succ - && ! (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))) - { - if (next) - { - prev = make_reorder_chain_1 (next, prev); - next = RBI (e->dest)->visited ? NULL : e->dest; - } - else - next = e->dest; - } - if (next) + else { - bb = next; - goto restart; + /* We have not found suitable loop tail so do no rotation. */ + best_bb = back_edge->src; } - - return prev; + best_bb->aux = NULL; + return best_bb; } +/* This function marks BB that it was visited in trace number TRACE. */ -/* Locate or create a label for a given basic block. */ - -static rtx -label_for_bb (bb) - basic_block bb; +static void +mark_bb_visited (basic_block bb, int trace) { - rtx label = bb->head; - - if (GET_CODE (label) != CODE_LABEL) + bb->il.rtl->visited = trace; + if (bbd[bb->index].heap) { - if (rtl_dump_file) - fprintf (rtl_dump_file, "Emitting label for block %d (%d)\n", - bb->index, RBI (bb)->index); - - label = emit_label_before (gen_label_rtx (), label); - if (bb->head == RBI (bb)->eff_head) - RBI (bb)->eff_head = label; - bb->head = label; + fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node); + bbd[bb->index].heap = NULL; + bbd[bb->index].node = NULL; } - - return label; } +/* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do + not include basic blocks their probability is lower than BRANCH_TH or their + frequency is lower than EXEC_TH into traces (or count is lower than + COUNT_TH). It stores the new traces into TRACES and modifies the number of + traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It + expects that starting basic blocks are in *HEAP and at the end it deletes + *HEAP and stores starting points for the next round into new *HEAP. */ -/* Emit a jump to BB after insn AFTER. */ - -static rtx -emit_jump_to_block_after (bb, after) - basic_block bb; - rtx after; +static void +find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, + struct trace *traces, int *n_traces, int round, + fibheap_t *heap, int number_of_rounds) { - rtx jump; + /* Heap for discarded basic blocks which are possible starting points for + the next round. */ + fibheap_t new_heap = fibheap_new (); - if (bb != EXIT_BLOCK_PTR) - { - rtx label = label_for_bb (bb); - jump = emit_jump_insn_after (gen_jump (label), after); - JUMP_LABEL (jump) = label; - LABEL_NUSES (label) += 1; - if (basic_block_for_insn) - set_block_for_new_insns (jump, bb); - - if (rtl_dump_file) - fprintf (rtl_dump_file, "Emitting jump to block %d (%d)\n", - bb->index, RBI (bb)->index); - } - else + while (!fibheap_empty (*heap)) { -#ifdef HAVE_return - if (! HAVE_return) - abort (); - jump = emit_jump_insn_after (gen_return (), after); - - if (rtl_dump_file) - fprintf (rtl_dump_file, "Emitting return\n"); -#else - abort (); -#endif - } - - return jump; -} - + basic_block bb; + struct trace *trace; + edge best_edge, e; + fibheapkey_t key; + edge_iterator ei; + + bb = fibheap_extract_min (*heap); + bbd[bb->index].heap = NULL; + bbd[bb->index].node = NULL; + + if (dump_file) + fprintf (dump_file, "Getting bb %d\n", bb->index); + + /* If the BB's frequency is too low send BB to the next round. When + partitioning hot/cold blocks into separate sections, make sure all + the cold blocks (and ONLY the cold blocks) go into the (extra) final + round. */ + + if (push_to_next_round_p (bb, round, number_of_rounds, exec_th, + count_th)) + { + int key = bb_to_key (bb); + bbd[bb->index].heap = new_heap; + bbd[bb->index].node = fibheap_insert (new_heap, key, bb); + + if (dump_file) + fprintf (dump_file, + " Possible start point of next round: %d (key: %d)\n", + bb->index, key); + continue; + } -/* Given a reorder chain, rearrange the code to match. */ + trace = traces + *n_traces; + trace->first = bb; + trace->round = round; + trace->length = 0; + bbd[bb->index].in_trace = *n_traces; + (*n_traces)++; -static void -fixup_reorder_chain () -{ - basic_block bb, last_bb; + do + { + int prob, freq; + bool ends_in_call; - /* First do the bulk reordering -- rechain the blocks without regard to - the needed changes to jumps and labels. */ + /* The probability and frequency of the best edge. */ + int best_prob = INT_MIN / 2; + int best_freq = INT_MIN / 2; - last_bb = BASIC_BLOCK (0); - bb = RBI (last_bb)->next; - while (bb) - { - rtx last_e = RBI (last_bb)->eff_end; - rtx curr_h = RBI (bb)->eff_head; + best_edge = NULL; + mark_bb_visited (bb, *n_traces); + trace->length++; - NEXT_INSN (last_e) = curr_h; - PREV_INSN (curr_h) = last_e; + if (dump_file) + fprintf (dump_file, "Basic block %d was visited in trace %d\n", + bb->index, *n_traces - 1); - last_bb = bb; - bb = RBI (bb)->next; - } + ends_in_call = block_ends_with_call_p (bb); - { - rtx insn = RBI (last_bb)->eff_end; + /* Select the successor that will be placed after BB. */ + FOR_EACH_EDGE (e, ei, bb->succs) + { + gcc_assert (!(e->flags & EDGE_FAKE)); - NEXT_INSN (insn) = function_tail_eff_head; - if (function_tail_eff_head) - PREV_INSN (function_tail_eff_head) = insn; + if (e->dest == EXIT_BLOCK_PTR) + continue; - while (NEXT_INSN (insn)) - insn = NEXT_INSN (insn); - set_last_insn (insn); - } + if (e->dest->il.rtl->visited + && e->dest->il.rtl->visited != *n_traces) + continue; - /* Now add jumps and labels as needed to match the blocks new - outgoing edges. */ + if (BB_PARTITION (e->dest) != BB_PARTITION (bb)) + continue; - for (bb = BASIC_BLOCK (0); bb ; bb = RBI (bb)->next) - { - edge e_fall, e_taken, e; - rtx jump_insn, barrier_insn, bb_end_insn; - basic_block nb; + prob = e->probability; + freq = e->dest->frequency; - if (bb->succ == NULL) - continue; + /* The only sensible preference for a call instruction is the + fallthru edge. Don't bother selecting anything else. */ + if (ends_in_call) + { + if (e->flags & EDGE_CAN_FALLTHRU) + { + best_edge = e; + best_prob = prob; + best_freq = freq; + } + continue; + } - /* Find the old fallthru edge, and another non-EH edge for - a taken jump. */ - e_taken = e_fall = NULL; - for (e = bb->succ; e ; e = e->succ_next) - if (e->flags & EDGE_FALLTHRU) - e_fall = e; - else if (! (e->flags & EDGE_EH)) - e_taken = e; - - bb_end_insn = bb->end; - if (GET_CODE (bb_end_insn) == JUMP_INSN) - { - if (any_uncondjump_p (bb_end_insn)) - { - /* If the destination is still not next, nothing to do. */ - if (RBI (bb)->index + 1 != RBI (e_taken->dest)->index) + /* Edge that cannot be fallthru or improbable or infrequent + successor (i.e. it is unsuitable successor). */ + if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX) + || prob < branch_th || EDGE_FREQUENCY (e) < exec_th + || e->count < count_th) continue; - /* Otherwise, we can remove the jump and cleanup the edge. */ - tidy_fallthru_edge (e_taken, bb, e_taken->dest); - RBI (bb)->eff_end = skip_insns_after_block (bb); - RBI (e_taken->dest)->eff_head = NEXT_INSN (RBI (bb)->eff_end); + /* If partitioning hot/cold basic blocks, don't consider edges + that cross section boundaries. */ - if (rtl_dump_file) - fprintf (rtl_dump_file, "Removing jump in block %d (%d)\n", - bb->index, RBI (bb)->index); - continue; + if (better_edge_p (bb, e, prob, freq, best_prob, best_freq, + best_edge)) + { + best_edge = e; + best_prob = prob; + best_freq = freq; + } } - else if (any_condjump_p (bb_end_insn)) + + /* If the best destination has multiple predecessors, and can be + duplicated cheaper than a jump, don't allow it to be added + to a trace. We'll duplicate it when connecting traces. */ + if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2 + && copy_bb_p (best_edge->dest, 0)) + best_edge = NULL; + + /* Add all non-selected successors to the heaps. */ + FOR_EACH_EDGE (e, ei, bb->succs) { - /* If the old fallthru is still next, nothing to do. */ - if (RBI (bb)->index + 1 == RBI (e_fall->dest)->index - || (RBI (bb)->index == n_basic_blocks - 1 - && e_fall->dest == EXIT_BLOCK_PTR)) + if (e == best_edge + || e->dest == EXIT_BLOCK_PTR + || e->dest->il.rtl->visited) continue; - /* There is one special case: if *neither* block is next, - such as happens at the very end of a function, then we'll - need to add a new unconditional jump. Choose the taken - edge based on known or assumed probability. */ - if (RBI (bb)->index + 1 != RBI (e_taken->dest)->index) + key = bb_to_key (e->dest); + + if (bbd[e->dest->index].heap) { - rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0); - if (note - && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2 - && invert_jump (bb_end_insn, - label_for_bb (e_fall->dest), 0)) + /* E->DEST is already in some heap. */ + if (key != bbd[e->dest->index].node->key) { - e_fall->flags &= ~EDGE_FALLTHRU; - e_taken->flags |= EDGE_FALLTHRU; - e = e_fall, e_fall = e_taken, e_taken = e; + if (dump_file) + { + fprintf (dump_file, + "Changing key for bb %d from %ld to %ld.\n", + e->dest->index, + (long) bbd[e->dest->index].node->key, + key); + } + fibheap_replace_key (bbd[e->dest->index].heap, + bbd[e->dest->index].node, key); } } - - /* Otherwise we can try to invert the jump. This will - basically never fail, however, keep up the pretense. */ - else if (invert_jump (bb_end_insn, - label_for_bb (e_fall->dest), 0)) + else { - e_fall->flags &= ~EDGE_FALLTHRU; - e_taken->flags |= EDGE_FALLTHRU; - continue; + fibheap_t which_heap = *heap; + + prob = e->probability; + freq = EDGE_FREQUENCY (e); + + if (!(e->flags & EDGE_CAN_FALLTHRU) + || (e->flags & EDGE_COMPLEX) + || prob < branch_th || freq < exec_th + || e->count < count_th) + { + /* When partitioning hot/cold basic blocks, make sure + the cold blocks (and only the cold blocks) all get + pushed to the last round of trace collection. */ + + if (push_to_next_round_p (e->dest, round, + number_of_rounds, + exec_th, count_th)) + which_heap = new_heap; + } + + bbd[e->dest->index].heap = which_heap; + bbd[e->dest->index].node = fibheap_insert (which_heap, + key, e->dest); + + if (dump_file) + { + fprintf (dump_file, + " Possible start of %s round: %d (key: %ld)\n", + (which_heap == new_heap) ? "next" : "this", + e->dest->index, (long) key); + } + } } - else if (returnjump_p (bb_end_insn)) - continue; - else + + if (best_edge) /* Suitable successor was found. */ { - /* Otherwise we have some switch or computed jump. In the - 99% case, there should not have been a fallthru edge. */ - if (! e_fall) - continue; -#ifdef CASE_DROPS_THROUGH - /* Except for VAX. Since we didn't have predication for the - tablejump, the fallthru block should not have moved. */ - if (RBI (bb)->index + 1 == RBI (e_fall->dest)->index) - continue; - bb_end_insn = skip_insns_after_block (bb); -#else - abort (); -#endif + if (best_edge->dest->il.rtl->visited == *n_traces) + { + /* We do nothing with one basic block loops. */ + if (best_edge->dest != bb) + { + if (EDGE_FREQUENCY (best_edge) + > 4 * best_edge->dest->frequency / 5) + { + /* The loop has at least 4 iterations. If the loop + header is not the first block of the function + we can rotate the loop. */ + + if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb) + { + if (dump_file) + { + fprintf (dump_file, + "Rotating loop %d - %d\n", + best_edge->dest->index, bb->index); + } + bb->aux = best_edge->dest; + bbd[best_edge->dest->index].in_trace = + (*n_traces) - 1; + bb = rotate_loop (best_edge, trace, *n_traces); + } + } + else + { + /* The loop has less than 4 iterations. */ + + if (single_succ_p (bb) + && copy_bb_p (best_edge->dest, !optimize_size)) + { + bb = copy_bb (best_edge->dest, best_edge, bb, + *n_traces); + trace->length++; + } + } + } + + /* Terminate the trace. */ + break; + } + else + { + /* Check for a situation + + A + /| + B | + \| + C + + where + EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC) + >= EDGE_FREQUENCY (AC). + (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) ) + Best ordering is then A B C. + + This situation is created for example by: + + if (A) B; + C; + + */ + + FOR_EACH_EDGE (e, ei, bb->succs) + if (e != best_edge + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX) + && !e->dest->il.rtl->visited + && single_pred_p (e->dest) + && !(e->flags & EDGE_CROSSING) + && single_succ_p (e->dest) + && (single_succ_edge (e->dest)->flags + & EDGE_CAN_FALLTHRU) + && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX) + && single_succ (e->dest) == best_edge->dest + && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)) + { + best_edge = e; + if (dump_file) + fprintf (dump_file, "Selecting BB %d\n", + best_edge->dest->index); + break; + } + + bb->aux = best_edge->dest; + bbd[best_edge->dest->index].in_trace = (*n_traces) - 1; + bb = best_edge->dest; + } } } - else + while (best_edge); + trace->last = bb; + bbd[trace->first->index].start_of_trace = *n_traces - 1; + bbd[trace->last->index].end_of_trace = *n_traces - 1; + + /* The trace is terminated so we have to recount the keys in heap + (some block can have a lower key because now one of its predecessors + is an end of the trace). */ + FOR_EACH_EDGE (e, ei, bb->succs) { - /* No fallthru implies a noreturn function with EH edges, or - something similarly bizarre. In any case, we don't need to - do anything. */ - if (! e_fall) - continue; - - /* If the fallthru block is still next, nothing to do. */ - if (RBI (bb)->index + 1 == RBI (e_fall->dest)->index - || (RBI (bb)->index == n_basic_blocks - 1 - && e_fall->dest == EXIT_BLOCK_PTR)) + if (e->dest == EXIT_BLOCK_PTR + || e->dest->il.rtl->visited) continue; - /* We need a new jump insn. If the block has only one outgoing - edge, then we can stuff the new jump insn in directly. */ - if (bb->succ->succ_next == NULL) + if (bbd[e->dest->index].heap) { - e_fall->flags &= ~EDGE_FALLTHRU; - - jump_insn = emit_jump_to_block_after (e_fall->dest, bb_end_insn); - bb->end = jump_insn; - barrier_insn = emit_barrier_after (jump_insn); - RBI (bb)->eff_end = barrier_insn; - continue; + key = bb_to_key (e->dest); + if (key != bbd[e->dest->index].node->key) + { + if (dump_file) + { + fprintf (dump_file, + "Changing key for bb %d from %ld to %ld.\n", + e->dest->index, + (long) bbd[e->dest->index].node->key, key); + } + fibheap_replace_key (bbd[e->dest->index].heap, + bbd[e->dest->index].node, + key); + } } } - - /* We got here if we need to add a new jump insn in a new block - across the edge e_fall. */ - - jump_insn = emit_jump_to_block_after (e_fall->dest, bb_end_insn); - barrier_insn = emit_barrier_after (jump_insn); - - VARRAY_GROW (basic_block_info, ++n_basic_blocks); - create_basic_block (n_basic_blocks - 1, jump_insn, jump_insn, NULL); - - nb = BASIC_BLOCK (n_basic_blocks - 1); - nb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - nb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); - nb->local_set = 0; - nb->count = e_fall->count; - nb->frequency = EDGE_FREQUENCY (e_fall); - - COPY_REG_SET (nb->global_live_at_start, bb->global_live_at_start); - COPY_REG_SET (nb->global_live_at_end, bb->global_live_at_start); - - nb->aux = xmalloc (sizeof (struct reorder_block_def)); - RBI (nb)->eff_head = nb->head; - RBI (nb)->eff_end = barrier_insn; - RBI (nb)->scope = RBI (bb)->scope; - RBI (nb)->index = RBI (bb)->index + 1; - RBI (nb)->visited = 1; - RBI (nb)->next = RBI (bb)->next; - RBI (bb)->next = nb; - - /* Link to new block. */ - make_edge (NULL, nb, e_fall->dest, 0); - redirect_edge_succ (e_fall, nb); - nb->succ->count = e_fall->count; - nb->succ->probability = REG_BR_PROB_BASE; - - /* Don't process this new block. */ - bb = nb; - - /* Fix subsequent reorder block indices to reflect new block. */ - while ((nb = RBI (nb)->next) != NULL) - RBI (nb)->index += 1; } - /* Put basic_block_info in the new order. */ - for (bb = BASIC_BLOCK (0); bb ; bb = RBI (bb)->next) - { - bb->index = RBI (bb)->index; - BASIC_BLOCK (bb->index) = bb; - } -} + fibheap_delete (*heap); + /* "Return" the new heap. */ + *heap = new_heap; +} -/* Perform sanity checks on the insn chain. - 1. Check that next/prev pointers are consistent in both the forward and - reverse direction. - 2. Count insns in chain, going both directions, and check if equal. - 3. Check that get_last_insn () returns the actual end of chain. */ +/* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add + it to trace after BB, mark OLD_BB visited and update pass' data structures + (TRACE is a number of trace which OLD_BB is duplicated to). */ -void -verify_insn_chain () +static basic_block +copy_bb (basic_block old_bb, edge e, basic_block bb, int trace) { - rtx x, - prevx, - nextx; - int insn_cnt1, - insn_cnt2; - - prevx = NULL; - insn_cnt1 = 1; - for (x = get_insns (); x; x = NEXT_INSN (x)) + basic_block new_bb; + + new_bb = duplicate_block (old_bb, e, bb); + BB_COPY_PARTITION (new_bb, old_bb); + + gcc_assert (e->dest == new_bb); + gcc_assert (!e->dest->il.rtl->visited); + + if (dump_file) + fprintf (dump_file, + "Duplicated bb %d (created bb %d)\n", + old_bb->index, new_bb->index); + new_bb->il.rtl->visited = trace; + new_bb->aux = bb->aux; + bb->aux = new_bb; + + if (new_bb->index >= array_size || last_basic_block > array_size) { - if (PREV_INSN (x) != prevx) + int i; + int new_size; + + new_size = MAX (last_basic_block, new_bb->index + 1); + new_size = GET_ARRAY_SIZE (new_size); + bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data)); + for (i = array_size; i < new_size; i++) { - fprintf (stderr, "Forward traversal: insn chain corrupt.\n"); - fprintf (stderr, "previous insn:\n"); - debug_rtx (prevx); - fprintf (stderr, "current insn:\n"); - debug_rtx (x); - abort (); + bbd[i].start_of_trace = -1; + bbd[i].in_trace = -1; + bbd[i].end_of_trace = -1; + bbd[i].heap = NULL; + bbd[i].node = NULL; } - ++insn_cnt1; - prevx = x; - } - - if (prevx != get_last_insn ()) - { - fprintf (stderr, "last_insn corrupt.\n"); - abort (); - } + array_size = new_size; - nextx = NULL; - insn_cnt2 = 1; - for (x = get_last_insn (); x; x = PREV_INSN (x)) - { - if (NEXT_INSN (x) != nextx) + if (dump_file) { - fprintf (stderr, "Reverse traversal: insn chain corrupt.\n"); - fprintf (stderr, "current insn:\n"); - debug_rtx (x); - fprintf (stderr, "next insn:\n"); - debug_rtx (nextx); - abort (); + fprintf (dump_file, + "Growing the dynamic array to %d elements.\n", + array_size); } - ++insn_cnt2; - nextx = x; } - if (insn_cnt1 != insn_cnt2) - { - fprintf (stderr, "insn_cnt1 (%d) not equal to insn_cnt2 (%d).\n", - insn_cnt1, insn_cnt2); - abort (); - } + bbd[new_bb->index].in_trace = trace; + + return new_bb; } -static rtx -get_next_bb_note (x) - rtx x; +/* Compute and return the key (for the heap) of the basic block BB. */ + +static fibheapkey_t +bb_to_key (basic_block bb) { - while (x) + edge e; + edge_iterator ei; + int priority = 0; + + /* Do not start in probably never executed blocks. */ + + if (BB_PARTITION (bb) == BB_COLD_PARTITION + || probably_never_executed_bb_p (bb)) + return BB_FREQ_MAX; + + /* Prefer blocks whose predecessor is an end of some trace + or whose predecessor edge is EDGE_DFS_BACK. */ + FOR_EACH_EDGE (e, ei, bb->preds) { - if (NOTE_INSN_BASIC_BLOCK_P (x)) - return x; - x = NEXT_INSN (x); + if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0) + || (e->flags & EDGE_DFS_BACK)) + { + int edge_freq = EDGE_FREQUENCY (e); + + if (edge_freq > priority) + priority = edge_freq; + } } - return NULL; + + if (priority) + /* The block with priority should have significantly lower key. */ + return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency); + return -bb->frequency; } +/* Return true when the edge E from basic block BB is better than the temporary + best edge (details are in function). The probability of edge E is PROB. The + frequency of the successor is FREQ. The current best probability is + BEST_PROB, the best frequency is BEST_FREQ. + The edge is considered to be equivalent when PROB does not differ much from + BEST_PROB; similarly for frequency. */ -static rtx -get_prev_bb_note (x) - rtx x; +static bool +better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob, + int best_freq, edge cur_best_edge) { - while (x) - { - if (NOTE_INSN_BASIC_BLOCK_P (x)) - return x; - x = PREV_INSN (x); - } - return NULL; -} + bool is_better_edge; + + /* The BEST_* values do not have to be best, but can be a bit smaller than + maximum values. */ + int diff_prob = best_prob / 10; + int diff_freq = best_freq / 10; + + if (prob > best_prob + diff_prob) + /* The edge has higher probability than the temporary best edge. */ + is_better_edge = true; + else if (prob < best_prob - diff_prob) + /* The edge has lower probability than the temporary best edge. */ + is_better_edge = false; + else if (freq < best_freq - diff_freq) + /* The edge and the temporary best edge have almost equivalent + probabilities. The higher frequency of a successor now means + that there is another edge going into that successor. + This successor has lower frequency so it is better. */ + is_better_edge = true; + else if (freq > best_freq + diff_freq) + /* This successor has higher frequency so it is worse. */ + is_better_edge = false; + else if (e->dest->prev_bb == bb) + /* The edges have equivalent probabilities and the successors + have equivalent frequencies. Select the previous successor. */ + is_better_edge = true; + else + is_better_edge = false; + + /* If we are doing hot/cold partitioning, make sure that we always favor + non-crossing edges over crossing edges. */ + if (!is_better_edge + && flag_reorder_blocks_and_partition + && cur_best_edge + && (cur_best_edge->flags & EDGE_CROSSING) + && !(e->flags & EDGE_CROSSING)) + is_better_edge = true; -/* Determine and record the relationships between basic blocks and - scopes in scope tree S. */ + return is_better_edge; +} + +/* Connect traces in array TRACES, N_TRACES is the count of traces. */ static void -relate_bbs_with_scopes (s) - scope s; +connect_traces (int n_traces, struct trace *traces) { - scope p; - int i, bbi1, bbi2, bbs_spanned; - rtx bbnote; + int i; + bool *connected; + bool two_passes; + int last_trace; + int current_pass; + int current_partition; + int freq_threshold; + gcov_type count_threshold; + + freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000; + if (max_entry_count < INT_MAX / 1000) + count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000; + else + count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD; - for (p = s->inner; p; p = p->next) - relate_bbs_with_scopes (p); + connected = XCNEWVEC (bool, n_traces); + last_trace = -1; + current_pass = 1; + current_partition = BB_PARTITION (traces[0].first); + two_passes = false; - bbi1 = bbi2 = -1; - bbs_spanned = 0; + if (flag_reorder_blocks_and_partition) + for (i = 0; i < n_traces && !two_passes; i++) + if (BB_PARTITION (traces[0].first) + != BB_PARTITION (traces[i].first)) + two_passes = true; - /* If the begin and end notes are both inside the same basic block, - or if they are both outside of basic blocks, then we know immediately - how they are related. Otherwise, we need to poke around to make the - determination. */ - if (s->bb_beg != s->bb_end) + for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++) { - if (s->bb_beg && s->bb_end) - { - /* Both notes are in different bbs. This implies that all the - basic blocks spanned by the pair of notes are contained in - this scope. */ - bbi1 = s->bb_beg->index; - bbi2 = s->bb_end->index; - bbs_spanned = 1; + int t = i; + int t2; + edge e, best; + int best_len; + + if (i >= n_traces) + { + gcc_assert (two_passes && current_pass == 1); + i = 0; + t = i; + current_pass = 2; + if (current_partition == BB_HOT_PARTITION) + current_partition = BB_COLD_PARTITION; + else + current_partition = BB_HOT_PARTITION; } - else if (! s->bb_beg) - { - /* First note is outside of a bb. If the scope spans more than - one basic block, then they all are contained within this - scope. Otherwise, this scope is contained within the basic - block. */ - bbnote = get_next_bb_note (s->note_beg); - if (! bbnote) - abort (); - if (NOTE_BASIC_BLOCK (bbnote) == s->bb_end) + + if (connected[t]) + continue; + + if (two_passes + && BB_PARTITION (traces[t].first) != current_partition) + continue; + + connected[t] = true; + + /* Find the predecessor traces. */ + for (t2 = t; t2 > 0;) + { + edge_iterator ei; + best = NULL; + best_len = 0; + FOR_EACH_EDGE (e, ei, traces[t2].first->preds) { - bbs_spanned = 0; - s->bb_beg = NOTE_BASIC_BLOCK (bbnote); + int si = e->src->index; + + if (e->src != ENTRY_BLOCK_PTR + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX) + && bbd[si].end_of_trace >= 0 + && !connected[bbd[si].end_of_trace] + && (BB_PARTITION (e->src) == current_partition) + && (!best + || e->probability > best->probability + || (e->probability == best->probability + && traces[bbd[si].end_of_trace].length > best_len))) + { + best = e; + best_len = traces[bbd[si].end_of_trace].length; + } } - else + if (best) { - bbi1 = NOTE_BASIC_BLOCK (bbnote)->index; - bbi2 = s->bb_end->index; - s->bb_end = NULL; - bbs_spanned = 1; + best->src->aux = best->dest; + t2 = bbd[best->src->index].end_of_trace; + connected[t2] = true; + + if (dump_file) + { + fprintf (dump_file, "Connection: %d %d\n", + best->src->index, best->dest->index); + } } + else + break; } - else /* ! s->bb_end */ - { - /* Second note is outside of a bb. If the scope spans more than - one basic block, then they all are contained within this - scope. Otherwise, this scope is contained within the basic - block. */ - bbnote = get_prev_bb_note (s->note_end); - if (! bbnote) - abort (); - if (NOTE_BASIC_BLOCK (bbnote) == s->bb_beg) + + if (last_trace >= 0) + traces[last_trace].last->aux = traces[t2].first; + last_trace = t; + + /* Find the successor traces. */ + while (1) + { + /* Find the continuation of the chain. */ + edge_iterator ei; + best = NULL; + best_len = 0; + FOR_EACH_EDGE (e, ei, traces[t].last->succs) { - bbs_spanned = 0; - s->bb_end = NOTE_BASIC_BLOCK (bbnote); + int di = e->dest->index; + + if (e->dest != EXIT_BLOCK_PTR + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX) + && bbd[di].start_of_trace >= 0 + && !connected[bbd[di].start_of_trace] + && (BB_PARTITION (e->dest) == current_partition) + && (!best + || e->probability > best->probability + || (e->probability == best->probability + && traces[bbd[di].start_of_trace].length > best_len))) + { + best = e; + best_len = traces[bbd[di].start_of_trace].length; + } + } + + if (best) + { + if (dump_file) + { + fprintf (dump_file, "Connection: %d %d\n", + best->src->index, best->dest->index); + } + t = bbd[best->dest->index].start_of_trace; + traces[last_trace].last->aux = traces[t].first; + connected[t] = true; + last_trace = t; } else { - bbi1 = s->bb_beg->index; - bbi2 = NOTE_BASIC_BLOCK (bbnote)->index; - s->bb_beg = NULL; - bbs_spanned = 1; + /* Try to connect the traces by duplication of 1 block. */ + edge e2; + basic_block next_bb = NULL; + bool try_copy = false; + + FOR_EACH_EDGE (e, ei, traces[t].last->succs) + if (e->dest != EXIT_BLOCK_PTR + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX) + && (!best || e->probability > best->probability)) + { + edge_iterator ei; + edge best2 = NULL; + int best2_len = 0; + + /* If the destination is a start of a trace which is only + one block long, then no need to search the successor + blocks of the trace. Accept it. */ + if (bbd[e->dest->index].start_of_trace >= 0 + && traces[bbd[e->dest->index].start_of_trace].length + == 1) + { + best = e; + try_copy = true; + continue; + } + + FOR_EACH_EDGE (e2, ei, e->dest->succs) + { + int di = e2->dest->index; + + if (e2->dest == EXIT_BLOCK_PTR + || ((e2->flags & EDGE_CAN_FALLTHRU) + && !(e2->flags & EDGE_COMPLEX) + && bbd[di].start_of_trace >= 0 + && !connected[bbd[di].start_of_trace] + && (BB_PARTITION (e2->dest) == current_partition) + && (EDGE_FREQUENCY (e2) >= freq_threshold) + && (e2->count >= count_threshold) + && (!best2 + || e2->probability > best2->probability + || (e2->probability == best2->probability + && traces[bbd[di].start_of_trace].length + > best2_len)))) + { + best = e; + best2 = e2; + if (e2->dest != EXIT_BLOCK_PTR) + best2_len = traces[bbd[di].start_of_trace].length; + else + best2_len = INT_MAX; + next_bb = e2->dest; + try_copy = true; + } + } + } + + if (flag_reorder_blocks_and_partition) + try_copy = false; + + /* Copy tiny blocks always; copy larger blocks only when the + edge is traversed frequently enough. */ + if (try_copy + && copy_bb_p (best->dest, + !optimize_size + && EDGE_FREQUENCY (best) >= freq_threshold + && best->count >= count_threshold)) + { + basic_block new_bb; + + if (dump_file) + { + fprintf (dump_file, "Connection: %d %d ", + traces[t].last->index, best->dest->index); + if (!next_bb) + fputc ('\n', dump_file); + else if (next_bb == EXIT_BLOCK_PTR) + fprintf (dump_file, "exit\n"); + else + fprintf (dump_file, "%d\n", next_bb->index); + } + + new_bb = copy_bb (best->dest, best, traces[t].last, t); + traces[t].last = new_bb; + if (next_bb && next_bb != EXIT_BLOCK_PTR) + { + t = bbd[next_bb->index].start_of_trace; + traces[last_trace].last->aux = traces[t].first; + connected[t] = true; + last_trace = t; + } + else + break; /* Stop finding the successor traces. */ + } + else + break; /* Stop finding the successor traces. */ } } } - else + + if (dump_file) { - if (s->bb_beg) - /* Both notes are in the same bb, which implies the block - contains this scope. */ - bbs_spanned = 0; + basic_block bb; + + fprintf (dump_file, "Final order:\n"); + for (bb = traces[0].first; bb; bb = bb->aux) + fprintf (dump_file, "%d ", bb->index); + fprintf (dump_file, "\n"); + fflush (dump_file); + } + + FREE (connected); +} + +/* Return true when BB can and should be copied. CODE_MAY_GROW is true + when code size is allowed to grow by duplication. */ + +static bool +copy_bb_p (basic_block bb, int code_may_grow) +{ + int size = 0; + int max_size = uncond_jump_length; + rtx insn; + + if (!bb->frequency) + return false; + if (EDGE_COUNT (bb->preds) < 2) + return false; + if (!can_duplicate_block_p (bb)) + return false; + + /* Avoid duplicating blocks which have many successors (PR/13430). */ + if (EDGE_COUNT (bb->succs) > 8) + return false; + + if (code_may_grow && maybe_hot_bb_p (bb)) + max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS); + + FOR_BB_INSNS (bb, insn) + { + if (INSN_P (insn)) + size += get_attr_min_length (insn); + } + + if (size <= max_size) + return true; + + if (dump_file) + { + fprintf (dump_file, + "Block %d can't be copied because its size = %d.\n", + bb->index, size); + } + + return false; +} + +/* Return the length of unconditional jump instruction. */ + +static int +get_uncond_jump_length (void) +{ + rtx label, jump; + int length; + + label = emit_label_before (gen_label_rtx (), get_insns ()); + jump = emit_jump_insn (gen_jump (label)); + + length = get_attr_min_length (jump); + + delete_insn (jump); + delete_insn (label); + return length; +} + +/* Find the basic blocks that are rarely executed and need to be moved to + a separate section of the .o file (to cut down on paging and improve + cache locality). */ + +static void +find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, + int *n_crossing_edges, + int *max_idx) +{ + basic_block bb; + bool has_hot_blocks = false; + edge e; + int i; + edge_iterator ei; + + /* Mark which partition (hot/cold) each basic block belongs in. */ + + FOR_EACH_BB (bb) + { + if (probably_never_executed_bb_p (bb)) + BB_SET_PARTITION (bb, BB_COLD_PARTITION); else { - rtx x1, x2; - /* Both notes are outside of any bbs. This implies that all the - basic blocks spanned by the pair of notes are contained in - this scope. - There is a degenerate case to consider. If the notes do not - span any basic blocks, then it is an empty scope that can - safely be deleted or ignored. Mark these with level = -1. */ - - x1 = get_next_bb_note (s->note_beg); - x2 = get_prev_bb_note (s->note_end); - if (! (x1 && x2)) - { - s->level = -1; - bbs_spanned = 0; - } - else - { - bbi1 = NOTE_BASIC_BLOCK (x1)->index; - bbi2 = NOTE_BASIC_BLOCK (x2)->index; - bbs_spanned = 1; - } + BB_SET_PARTITION (bb, BB_HOT_PARTITION); + has_hot_blocks = true; } } - /* If the scope spans one or more basic blocks, we record them. We - only record the bbs that are immediately contained within this - scope. Note that if a scope is contained within a bb, we can tell - by checking that bb_beg = bb_end and that they are non-null. */ - if (bbs_spanned) - { - int j = 0; + /* Mark every edge that crosses between sections. */ - s->num_bbs = 0; - for (i = bbi1; i <= bbi2; i++) - if (! RBI (BASIC_BLOCK (i))->scope) - s->num_bbs++; - - s->bbs = xmalloc (s->num_bbs * sizeof (basic_block)); - for (i = bbi1; i <= bbi2; i++) + i = 0; + FOR_EACH_BB (bb) + FOR_EACH_EDGE (e, ei, bb->succs) + { + if (e->src != ENTRY_BLOCK_PTR + && e->dest != EXIT_BLOCK_PTR + && BB_PARTITION (e->src) != BB_PARTITION (e->dest)) { - basic_block curr_bb = BASIC_BLOCK (i); - if (! RBI (curr_bb)->scope) + e->flags |= EDGE_CROSSING; + if (i == *max_idx) { - s->bbs[j++] = curr_bb; - RBI (curr_bb)->scope = s; + *max_idx *= 2; + crossing_edges = xrealloc (crossing_edges, + (*max_idx) * sizeof (edge)); } + crossing_edges[i++] = e; } + else + e->flags &= ~EDGE_CROSSING; } - else - s->num_bbs = 0; + *n_crossing_edges = i; } +/* If any destination of a crossing edge does not have a label, add label; + Convert any fall-through crossing edges (for blocks that do not contain + a jump) to unconditional jumps. */ -/* Allocate and initialize a new scope structure with scope level LEVEL, - and record the NOTE beginning the scope. */ +static void +add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges) +{ + int i; + basic_block src; + basic_block dest; + rtx label; + rtx barrier; + rtx new_jump; + + for (i=0; i < n_crossing_edges; i++) + { + if (crossing_edges[i]) + { + src = crossing_edges[i]->src; + dest = crossing_edges[i]->dest; + + /* Make sure dest has a label. */ + + if (dest && (dest != EXIT_BLOCK_PTR)) + { + label = block_label (dest); + + /* Make sure source block ends with a jump. */ + + if (src && (src != ENTRY_BLOCK_PTR)) + { + if (!JUMP_P (BB_END (src))) + /* bb just falls through. */ + { + /* make sure there's only one successor */ + gcc_assert (single_succ_p (src)); + + /* Find label in dest block. */ + label = block_label (dest); + + new_jump = emit_jump_insn_after (gen_jump (label), + BB_END (src)); + barrier = emit_barrier_after (new_jump); + JUMP_LABEL (new_jump) = label; + LABEL_NUSES (label) += 1; + src->il.rtl->footer = unlink_insn_chain (barrier, barrier); + /* Mark edge as non-fallthru. */ + crossing_edges[i]->flags &= ~EDGE_FALLTHRU; + } /* end: 'if (GET_CODE ... ' */ + } /* end: 'if (src && src->index...' */ + } /* end: 'if (dest && dest->index...' */ + } /* end: 'if (crossing_edges[i]...' */ + } /* end for loop */ +} -static scope -make_new_scope (level, note) - int level; - rtx note; +/* Find any bb's where the fall-through edge is a crossing edge (note that + these bb's must also contain a conditional jump; we've already + dealt with fall-through edges for blocks that didn't have a + conditional jump in the call to add_labels_and_missing_jumps). + Convert the fall-through edge to non-crossing edge by inserting a + new bb to fall-through into. The new bb will contain an + unconditional jump (crossing edge) to the original fall through + destination. */ + +static void +fix_up_fall_thru_edges (void) { - scope new_scope = xcalloc (1, sizeof (struct scope_def)); - new_scope->level = level; - new_scope->note_beg = note; - return new_scope; + basic_block cur_bb; + basic_block new_bb; + edge succ1; + edge succ2; + edge fall_thru; + edge cond_jump = NULL; + edge e; + bool cond_jump_crosses; + int invert_worked; + rtx old_jump; + rtx fall_thru_label; + rtx barrier; + + FOR_EACH_BB (cur_bb) + { + fall_thru = NULL; + if (EDGE_COUNT (cur_bb->succs) > 0) + succ1 = EDGE_SUCC (cur_bb, 0); + else + succ1 = NULL; + + if (EDGE_COUNT (cur_bb->succs) > 1) + succ2 = EDGE_SUCC (cur_bb, 1); + else + succ2 = NULL; + + /* Find the fall-through edge. */ + + if (succ1 + && (succ1->flags & EDGE_FALLTHRU)) + { + fall_thru = succ1; + cond_jump = succ2; + } + else if (succ2 + && (succ2->flags & EDGE_FALLTHRU)) + { + fall_thru = succ2; + cond_jump = succ1; + } + + if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR)) + { + /* Check to see if the fall-thru edge is a crossing edge. */ + + if (fall_thru->flags & EDGE_CROSSING) + { + /* The fall_thru edge crosses; now check the cond jump edge, if + it exists. */ + + cond_jump_crosses = true; + invert_worked = 0; + old_jump = BB_END (cur_bb); + + /* Find the jump instruction, if there is one. */ + + if (cond_jump) + { + if (!(cond_jump->flags & EDGE_CROSSING)) + cond_jump_crosses = false; + + /* We know the fall-thru edge crosses; if the cond + jump edge does NOT cross, and its destination is the + next block in the bb order, invert the jump + (i.e. fix it so the fall thru does not cross and + the cond jump does). */ + + if (!cond_jump_crosses + && cur_bb->aux == cond_jump->dest) + { + /* Find label in fall_thru block. We've already added + any missing labels, so there must be one. */ + + fall_thru_label = block_label (fall_thru->dest); + + if (old_jump && fall_thru_label) + invert_worked = invert_jump (old_jump, + fall_thru_label,0); + if (invert_worked) + { + fall_thru->flags &= ~EDGE_FALLTHRU; + cond_jump->flags |= EDGE_FALLTHRU; + update_br_prob_note (cur_bb); + e = fall_thru; + fall_thru = cond_jump; + cond_jump = e; + cond_jump->flags |= EDGE_CROSSING; + fall_thru->flags &= ~EDGE_CROSSING; + } + } + } + + if (cond_jump_crosses || !invert_worked) + { + /* This is the case where both edges out of the basic + block are crossing edges. Here we will fix up the + fall through edge. The jump edge will be taken care + of later. */ + + new_bb = force_nonfallthru (fall_thru); + + if (new_bb) + { + new_bb->aux = cur_bb->aux; + cur_bb->aux = new_bb; + + /* Make sure new fall-through bb is in same + partition as bb it's falling through from. */ + + BB_COPY_PARTITION (new_bb, cur_bb); + single_succ_edge (new_bb)->flags |= EDGE_CROSSING; + } + + /* Add barrier after new jump */ + + if (new_bb) + { + barrier = emit_barrier_after (BB_END (new_bb)); + new_bb->il.rtl->footer = unlink_insn_chain (barrier, + barrier); + } + else + { + barrier = emit_barrier_after (BB_END (cur_bb)); + cur_bb->il.rtl->footer = unlink_insn_chain (barrier, + barrier); + } + } + } + } + } } +/* This function checks the destination blockof a "crossing jump" to + see if it has any crossing predecessors that begin with a code label + and end with an unconditional jump. If so, it returns that predecessor + block. (This is to avoid creating lots of new basic blocks that all + contain unconditional jumps to the same destination). */ + +static basic_block +find_jump_block (basic_block jump_dest) +{ + basic_block source_bb = NULL; + edge e; + rtx insn; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, jump_dest->preds) + if (e->flags & EDGE_CROSSING) + { + basic_block src = e->src; + + /* Check each predecessor to see if it has a label, and contains + only one executable instruction, which is an unconditional jump. + If so, we can use it. */ + + if (LABEL_P (BB_HEAD (src))) + for (insn = BB_HEAD (src); + !INSN_P (insn) && insn != NEXT_INSN (BB_END (src)); + insn = NEXT_INSN (insn)) + { + if (INSN_P (insn) + && insn == BB_END (src) + && JUMP_P (insn) + && !any_condjump_p (insn)) + { + source_bb = src; + break; + } + } + + if (source_bb) + break; + } + + return source_bb; +} -/* Build a forest representing the scope structure of the function. - Return a pointer to a structure describing the forest. */ +/* Find all BB's with conditional jumps that are crossing edges; + insert a new bb and make the conditional jump branch to the new + bb instead (make the new bb same color so conditional branch won't + be a 'crossing' edge). Insert an unconditional jump from the + new bb to the original destination of the conditional jump. */ static void -build_scope_forest (forest) - scope_forest_info *forest; +fix_crossing_conditional_branches (void) { - rtx x; - int level, bbi, i; - basic_block curr_bb; - scope root, curr_scope = 0; - - forest->num_trees = 0; - forest->trees = NULL; - level = -1; - root = NULL; - curr_bb = NULL; - bbi = 0; - for (x = get_insns (); x; x = NEXT_INSN (x)) + basic_block cur_bb; + basic_block new_bb; + basic_block last_bb; + basic_block dest; + basic_block prev_bb; + edge succ1; + edge succ2; + edge crossing_edge; + edge new_edge; + rtx old_jump; + rtx set_src; + rtx old_label = NULL_RTX; + rtx new_label; + rtx new_jump; + rtx barrier; + + last_bb = EXIT_BLOCK_PTR->prev_bb; + + FOR_EACH_BB (cur_bb) { - if (bbi < n_basic_blocks && x == BASIC_BLOCK (bbi)->head) - curr_bb = BASIC_BLOCK (bbi); + crossing_edge = NULL; + if (EDGE_COUNT (cur_bb->succs) > 0) + succ1 = EDGE_SUCC (cur_bb, 0); + else + succ1 = NULL; + + if (EDGE_COUNT (cur_bb->succs) > 1) + succ2 = EDGE_SUCC (cur_bb, 1); + else + succ2 = NULL; + + /* We already took care of fall-through edges, so only one successor + can be a crossing edge. */ + + if (succ1 && (succ1->flags & EDGE_CROSSING)) + crossing_edge = succ1; + else if (succ2 && (succ2->flags & EDGE_CROSSING)) + crossing_edge = succ2; + + if (crossing_edge) + { + old_jump = BB_END (cur_bb); + + /* Check to make sure the jump instruction is a + conditional jump. */ + + set_src = NULL_RTX; + + if (any_condjump_p (old_jump)) + { + if (GET_CODE (PATTERN (old_jump)) == SET) + set_src = SET_SRC (PATTERN (old_jump)); + else if (GET_CODE (PATTERN (old_jump)) == PARALLEL) + { + set_src = XVECEXP (PATTERN (old_jump), 0,0); + if (GET_CODE (set_src) == SET) + set_src = SET_SRC (set_src); + else + set_src = NULL_RTX; + } + } - if (GET_CODE (x) == NOTE) - { - if (NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_BEG) + if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE)) { - if (root) + if (GET_CODE (XEXP (set_src, 1)) == PC) + old_label = XEXP (set_src, 2); + else if (GET_CODE (XEXP (set_src, 2)) == PC) + old_label = XEXP (set_src, 1); + + /* Check to see if new bb for jumping to that dest has + already been created; if so, use it; if not, create + a new one. */ + + new_bb = find_jump_block (crossing_edge->dest); + + if (new_bb) + new_label = block_label (new_bb); + else { - scope new_scope; - if (! curr_scope) - abort(); - level++; - new_scope = make_new_scope (level, x); - new_scope->outer = curr_scope; - new_scope->next = NULL; - if (! curr_scope->inner) + /* Create new basic block to be dest for + conditional jump. */ + + new_bb = create_basic_block (NULL, NULL, last_bb); + new_bb->aux = last_bb->aux; + last_bb->aux = new_bb; + prev_bb = last_bb; + last_bb = new_bb; + + /* Update register liveness information. */ + + new_bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack); + new_bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack); + COPY_REG_SET (new_bb->il.rtl->global_live_at_end, + prev_bb->il.rtl->global_live_at_end); + COPY_REG_SET (new_bb->il.rtl->global_live_at_start, + prev_bb->il.rtl->global_live_at_end); + + /* Put appropriate instructions in new bb. */ + + new_label = gen_label_rtx (); + emit_label_before (new_label, BB_HEAD (new_bb)); + BB_HEAD (new_bb) = new_label; + + if (GET_CODE (old_label) == LABEL_REF) { - curr_scope->inner = new_scope; - curr_scope->inner_last = new_scope; + old_label = JUMP_LABEL (old_jump); + new_jump = emit_jump_insn_after (gen_jump + (old_label), + BB_END (new_bb)); } else { - curr_scope->inner_last->next = new_scope; - curr_scope->inner_last = new_scope; + gcc_assert (HAVE_return + && GET_CODE (old_label) == RETURN); + new_jump = emit_jump_insn_after (gen_return (), + BB_END (new_bb)); } - curr_scope = curr_scope->inner_last; + + barrier = emit_barrier_after (new_jump); + JUMP_LABEL (new_jump) = old_label; + new_bb->il.rtl->footer = unlink_insn_chain (barrier, + barrier); + + /* Make sure new bb is in same partition as source + of conditional branch. */ + BB_COPY_PARTITION (new_bb, cur_bb); } + + /* Make old jump branch to new bb. */ + + redirect_jump (old_jump, new_label, 0); + + /* Remove crossing_edge as predecessor of 'dest'. */ + + dest = crossing_edge->dest; + + redirect_edge_succ (crossing_edge, new_bb); + + /* Make a new edge from new_bb to old dest; new edge + will be a successor for new_bb and a predecessor + for 'dest'. */ + + if (EDGE_COUNT (new_bb->succs) == 0) + new_edge = make_edge (new_bb, dest, 0); else - { - int ntrees = forest->num_trees; - level++; - curr_scope = make_new_scope (level, x); - root = curr_scope; - forest->trees = xrealloc (forest->trees, - sizeof (scope) * (ntrees + 1)); - forest->trees[forest->num_trees++] = root; - } - curr_scope->bb_beg = curr_bb; - } - else if (NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_END) - { - curr_scope->bb_end = curr_bb; - curr_scope->note_end = x; - level--; - curr_scope = curr_scope->outer; - if (level == -1) - root = NULL; + new_edge = EDGE_SUCC (new_bb, 0); + + crossing_edge->flags &= ~EDGE_CROSSING; + new_edge->flags |= EDGE_CROSSING; } - } /* if note */ - - if (curr_bb && curr_bb->end == x) - { - curr_bb = NULL; - bbi++; - } - - } /* for */ - - for (i = 0; i < forest->num_trees; i++) - relate_bbs_with_scopes (forest->trees[i]); + } + } } - -/* Remove all the NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes from - the insn chain. */ +/* Find any unconditional branches that cross between hot and cold + sections. Convert them into indirect jumps instead. */ static void -remove_scope_notes () +fix_crossing_unconditional_branches (void) { - rtx x, next; - basic_block currbb = NULL; - - for (x = get_insns (); x; x = next) + basic_block cur_bb; + rtx last_insn; + rtx label; + rtx label_addr; + rtx indirect_jump_sequence; + rtx jump_insn = NULL_RTX; + rtx new_reg; + rtx cur_insn; + edge succ; + + FOR_EACH_BB (cur_bb) { - next = NEXT_INSN (x); - if (NOTE_INSN_BASIC_BLOCK_P (x)) - currbb = NOTE_BASIC_BLOCK (x); + last_insn = BB_END (cur_bb); - if (GET_CODE (x) == NOTE - && (NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_BEG - || NOTE_LINE_NUMBER (x) == NOTE_INSN_BLOCK_END)) + if (EDGE_COUNT (cur_bb->succs) < 1) + continue; + + succ = EDGE_SUCC (cur_bb, 0); + + /* Check to see if bb ends in a crossing (unconditional) jump. At + this point, no crossing jumps should be conditional. */ + + if (JUMP_P (last_insn) + && (succ->flags & EDGE_CROSSING)) { - /* Check if the scope note happens to be the end of a bb. */ - if (currbb && x == currbb->end) - currbb->end = PREV_INSN (x); - if (currbb && x == currbb->head) - abort (); + rtx label2, table; - if (PREV_INSN (x)) - { - NEXT_INSN (PREV_INSN (x)) = next; - PREV_INSN (next) = PREV_INSN (x); + gcc_assert (!any_condjump_p (last_insn)); + + /* Make sure the jump is not already an indirect or table jump. */ - NEXT_INSN (x) = NULL; - PREV_INSN (x) = NULL; + if (!computed_jump_p (last_insn) + && !tablejump_p (last_insn, &label2, &table)) + { + /* We have found a "crossing" unconditional branch. Now + we must convert it to an indirect jump. First create + reference of label, as target for jump. */ + + label = JUMP_LABEL (last_insn); + label_addr = gen_rtx_LABEL_REF (Pmode, label); + LABEL_NUSES (label) += 1; + + /* Get a register to use for the indirect jump. */ + + new_reg = gen_reg_rtx (Pmode); + + /* Generate indirect the jump sequence. */ + + start_sequence (); + emit_move_insn (new_reg, label_addr); + emit_indirect_jump (new_reg); + indirect_jump_sequence = get_insns (); + end_sequence (); + + /* Make sure every instruction in the new jump sequence has + its basic block set to be cur_bb. */ + + for (cur_insn = indirect_jump_sequence; cur_insn; + cur_insn = NEXT_INSN (cur_insn)) + { + if (!BARRIER_P (cur_insn)) + BLOCK_FOR_INSN (cur_insn) = cur_bb; + if (JUMP_P (cur_insn)) + jump_insn = cur_insn; + } + + /* Insert the new (indirect) jump sequence immediately before + the unconditional jump, then delete the unconditional jump. */ + + emit_insn_before (indirect_jump_sequence, last_insn); + delete_insn (last_insn); + + /* Make BB_END for cur_bb be the jump instruction (NOT the + barrier instruction at the end of the sequence...). */ + + BB_END (cur_bb) = jump_insn; } - else - abort (); } } } - -/* Insert scope note pairs for a contained scope tree S after insn IP. */ +/* Add REG_CROSSING_JUMP note to all crossing jump insns. */ static void -insert_intra_1 (s, ip) - scope s; - rtx *ip; +add_reg_crossing_jump_notes (void) { - scope p; - - if (NOTE_BLOCK (s->note_beg)) - { - *ip = emit_note_after (NOTE_INSN_BLOCK_BEG, *ip); - NOTE_BLOCK (*ip) = NOTE_BLOCK (s->note_beg); - } - - for (p = s->inner; p; p = p->next) - insert_intra_1 (p, ip); - - if (NOTE_BLOCK (s->note_beg)) - { - *ip = emit_note_after (NOTE_INSN_BLOCK_END, *ip); - NOTE_BLOCK (*ip) = NOTE_BLOCK (s->note_end); - } + basic_block bb; + edge e; + edge_iterator ei; + + FOR_EACH_BB (bb) + FOR_EACH_EDGE (e, ei, bb->succs) + if ((e->flags & EDGE_CROSSING) + && JUMP_P (BB_END (e->src))) + REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, + NULL_RTX, + REG_NOTES (BB_END + (e->src))); } - -/* Insert NOTE_INSN_BLOCK_END notes and NOTE_INSN_BLOCK_BEG notes for - scopes that are contained within BB. */ - -static void -insert_intra_bb_scope_notes (bb) - basic_block bb; +/* Hot and cold basic blocks are partitioned and put in separate + sections of the .o file, to reduce paging and improve cache + performance (hopefully). This can result in bits of code from the + same function being widely separated in the .o file. However this + is not obvious to the current bb structure. Therefore we must take + care to ensure that: 1). There are no fall_thru edges that cross + between sections; 2). For those architectures which have "short" + conditional branches, all conditional branches that attempt to + cross between sections are converted to unconditional branches; + and, 3). For those architectures which have "short" unconditional + branches, all unconditional branches that attempt to cross between + sections are converted to indirect jumps. + + The code for fixing up fall_thru edges that cross between hot and + cold basic blocks does so by creating new basic blocks containing + unconditional branches to the appropriate label in the "other" + section. The new basic block is then put in the same (hot or cold) + section as the original conditional branch, and the fall_thru edge + is modified to fall into the new basic block instead. By adding + this level of indirection we end up with only unconditional branches + crossing between hot and cold sections. + + Conditional branches are dealt with by adding a level of indirection. + A new basic block is added in the same (hot/cold) section as the + conditional branch, and the conditional branch is retargeted to the + new basic block. The new basic block contains an unconditional branch + to the original target of the conditional branch (in the other section). + + Unconditional branches are dealt with by converting them into + indirect jumps. */ + +static void +fix_edges_for_rarely_executed_code (edge *crossing_edges, + int n_crossing_edges) { - scope s = RBI (bb)->scope; - scope p; - rtx ip; - - if (! s) - return; - - ip = bb->head; - if (GET_CODE (ip) == CODE_LABEL) - ip = NEXT_INSN (ip); - - for (p = s->inner; p; p = p->next) + /* Make sure the source of any crossing edge ends in a jump and the + destination of any crossing edge has a label. */ + + add_labels_and_missing_jumps (crossing_edges, n_crossing_edges); + + /* Convert all crossing fall_thru edges to non-crossing fall + thrus to unconditional jumps (that jump to the original fall + thru dest). */ + + fix_up_fall_thru_edges (); + + /* If the architecture does not have conditional branches that can + span all of memory, convert crossing conditional branches into + crossing unconditional branches. */ + + if (!HAS_LONG_COND_BRANCH) + fix_crossing_conditional_branches (); + + /* If the architecture does not have unconditional branches that + can span all of memory, convert crossing unconditional branches + into indirect jumps. Since adding an indirect jump also adds + a new register usage, update the register usage information as + well. */ + + if (!HAS_LONG_UNCOND_BRANCH) { - if (p->bb_beg != NULL && p->bb_beg == p->bb_end && p->bb_beg == bb) - insert_intra_1 (p, &ip); + fix_crossing_unconditional_branches (); + reg_scan (get_insns(), max_reg_num ()); } + + add_reg_crossing_jump_notes (); } - -/* Given two consecutive basic blocks BB1 and BB2 with different scopes, - insert NOTE_INSN_BLOCK_END notes after BB1 and NOTE_INSN_BLOCK_BEG - notes before BB2 such that the notes are correctly balanced. If BB1 or - BB2 is NULL, we are inserting scope notes for the first and last basic - blocks, respectively. */ +/* Verify, in the basic block chain, that there is at most one switch + between hot/cold partitions. This is modelled on + rtl_verify_flow_info_1, but it cannot go inside that function + because this condition will not be true until after + reorder_basic_blocks is called. */ static void -insert_inter_bb_scope_notes (bb1, bb2) - basic_block bb1; - basic_block bb2; +verify_hot_cold_block_grouping (void) { - rtx ip; - scope com; - - /* It is possible that a basic block is not contained in any scope. - In that case, we either open or close a scope but not both. */ - if (bb1 && bb2) - { - scope s1 = RBI (bb1)->scope; - scope s2 = RBI (bb2)->scope; - if (! s1 && ! s2) - return; - if (! s1) - bb1 = NULL; - else if (! s2) - bb2 = NULL; - } - - /* Find common ancestor scope. */ - if (bb1 && bb2) + basic_block bb; + int err = 0; + bool switched_sections = false; + int current_partition = 0; + + FOR_EACH_BB (bb) { - scope s1 = RBI (bb1)->scope; - scope s2 = RBI (bb2)->scope; - while (s1 != s2) + if (!current_partition) + current_partition = BB_PARTITION (bb); + if (BB_PARTITION (bb) != current_partition) { - if (! (s1 && s2)) - abort (); - if (s1->level > s2->level) - s1 = s1->outer; - else if (s2->level > s1->level) - s2 = s2->outer; - else + if (switched_sections) { - s1 = s1->outer; - s2 = s2->outer; - } - } - com = s1; - } - else - com = NULL; - - /* Close scopes. */ - if (bb1) - { - scope s = RBI (bb1)->scope; - ip = RBI (bb1)->eff_end; - while (s != com) - { - if (NOTE_BLOCK (s->note_beg)) - { - ip = emit_note_after (NOTE_INSN_BLOCK_END, ip); - NOTE_BLOCK (ip) = NOTE_BLOCK (s->note_end); + error ("multiple hot/cold transitions found (bb %i)", + bb->index); + err = 1; } - s = s->outer; - } - } - - /* Open scopes. */ - if (bb2) - { - scope s = RBI (bb2)->scope; - ip = bb2->head; - while (s != com) - { - if (NOTE_BLOCK (s->note_beg)) - { - ip = emit_note_before (NOTE_INSN_BLOCK_BEG, ip); - NOTE_BLOCK (ip) = NOTE_BLOCK (s->note_beg); + else + { + switched_sections = true; + current_partition = BB_PARTITION (bb); } - s = s->outer; } } + + gcc_assert(!err); } +/* Reorder basic blocks. The main entry point to this file. FLAGS is + the set of flags to pass to cfg_layout_initialize(). */ -/* Rebuild all the NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes based - on the scope forest and the newly reordered basic blocks. */ - -static void -rebuild_scope_notes (forest) - scope_forest_info *forest; +void +reorder_basic_blocks (unsigned int flags) { + int n_traces; int i; + struct trace *traces; - if (forest->num_trees == 0) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) return; - /* Start by opening the scopes before the first basic block. */ - insert_inter_bb_scope_notes (NULL, BASIC_BLOCK (0)); + if (targetm.cannot_modify_jumps_p ()) + return; + + cfg_layout_initialize (flags); + + set_edge_can_fallthru_flag (); + mark_dfs_back_edges (); + + /* We are estimating the length of uncond jump insn only once since the code + for getting the insn length always returns the minimal length now. */ + if (uncond_jump_length == 0) + uncond_jump_length = get_uncond_jump_length (); - /* Then, open and close scopes as needed between blocks. */ - for (i = 0; i < n_basic_blocks - 1; i++) + /* We need to know some information for each basic block. */ + array_size = GET_ARRAY_SIZE (last_basic_block); + bbd = XNEWVEC (bbro_basic_block_data, array_size); + for (i = 0; i < array_size; i++) { - basic_block bb1 = BASIC_BLOCK (i); - basic_block bb2 = BASIC_BLOCK (i + 1); - if (RBI (bb1)->scope != RBI (bb2)->scope) - insert_inter_bb_scope_notes (bb1, bb2); - insert_intra_bb_scope_notes (bb1); + bbd[i].start_of_trace = -1; + bbd[i].in_trace = -1; + bbd[i].end_of_trace = -1; + bbd[i].heap = NULL; + bbd[i].node = NULL; } - /* Finally, close the scopes after the last basic block. */ - insert_inter_bb_scope_notes (BASIC_BLOCK (n_basic_blocks - 1), NULL); - insert_intra_bb_scope_notes (BASIC_BLOCK (n_basic_blocks - 1)); -} + traces = XNEWVEC (struct trace, n_basic_blocks); + n_traces = 0; + find_traces (&n_traces, traces); + connect_traces (n_traces, traces); + FREE (traces); + FREE (bbd); + + if (dump_file) + dump_flow_info (dump_file, dump_flags); + cfg_layout_finalize (); + if (flag_reorder_blocks_and_partition) + verify_hot_cold_block_grouping (); +} -/* Free the storage associated with the scope tree at S. */ +/* Determine which partition the first basic block in the function + belongs to, then find the first basic block in the current function + that belongs to a different section, and insert a + NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the + instruction stream. When writing out the assembly code, + encountering this note will make the compiler switch between the + hot and cold text sections. */ static void -free_scope_forest_1 (s) - scope s; +insert_section_boundary_note (void) { - scope p, next; - - for (p = s->inner; p; p = next) + basic_block bb; + rtx new_note; + int first_partition = 0; + + if (flag_reorder_blocks_and_partition) + FOR_EACH_BB (bb) { - next = p->next; - free_scope_forest_1 (p); + if (!first_partition) + first_partition = BB_PARTITION (bb); + if (BB_PARTITION (bb) != first_partition) + { + new_note = emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS, + BB_HEAD (bb)); + break; + } } - - if (s->bbs) - free (s->bbs); - free (s); } +/* Duplicate the blocks containing computed gotos. This basically unfactors + computed gotos that were factored early on in the compilation process to + speed up edge based data flow. We used to not unfactoring them again, + which can seriously pessimize code with many computed jumps in the source + code, such as interpreters. See e.g. PR15242. */ -/* Free the storage associated with the scope forest. */ - -static void -free_scope_forest (forest) - scope_forest_info *forest; +static bool +gate_duplicate_computed_gotos (void) { - int i; - for (i = 0; i < forest->num_trees; i++) - free_scope_forest_1 (forest->trees[i]); + return (optimize > 0 && flag_expensive_optimizations && !optimize_size); } -/* Visualize the scope forest. */ - -void -dump_scope_forest (forest) - scope_forest_info *forest; +static unsigned int +duplicate_computed_gotos (void) { - if (forest->num_trees == 0) - fprintf (stderr, "\n< Empty scope forest >\n"); - else - { - int i; - fprintf (stderr, "\n< Scope forest >\n"); - for (i = 0; i < forest->num_trees; i++) - dump_scope_forest_1 (forest->trees[i], 0); - } -} + basic_block bb, new_bb; + bitmap candidates; + int max_size; + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) + return 0; -/* Recursive portion of dump_scope_forest. */ + if (targetm.cannot_modify_jumps_p ()) + return 0; -static void -dump_scope_forest_1 (s, indent) - scope s; - int indent; -{ - scope p; - int i; + cfg_layout_initialize (0); + + /* We are estimating the length of uncond jump insn only once + since the code for getting the insn length always returns + the minimal length now. */ + if (uncond_jump_length == 0) + uncond_jump_length = get_uncond_jump_length (); - if (s->bb_beg != NULL && s->bb_beg == s->bb_end - && RBI (s->bb_beg)->scope - && RBI (s->bb_beg)->scope->level + 1 == s->level) + max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS); + candidates = BITMAP_ALLOC (NULL); + + /* Look for blocks that end in a computed jump, and see if such blocks + are suitable for unfactoring. If a block is a candidate for unfactoring, + mark it in the candidates. */ + FOR_EACH_BB (bb) { - fprintf (stderr, "%*s", indent, ""); - fprintf (stderr, "BB%d:\n", s->bb_beg->index); + rtx insn; + edge e; + edge_iterator ei; + int size, all_flags; + + /* Build the reorder chain for the original order of blocks. */ + if (bb->next_bb != EXIT_BLOCK_PTR) + bb->aux = bb->next_bb; + + /* Obviously the block has to end in a computed jump. */ + if (!computed_jump_p (BB_END (bb))) + continue; + + /* Only consider blocks that can be duplicated. */ + if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX) + || !can_duplicate_block_p (bb)) + continue; + + /* Make sure that the block is small enough. */ + size = 0; + FOR_BB_INSNS (bb, insn) + if (INSN_P (insn)) + { + size += get_attr_min_length (insn); + if (size > max_size) + break; + } + if (size > max_size) + continue; + + /* Final check: there must not be any incoming abnormal edges. */ + all_flags = 0; + FOR_EACH_EDGE (e, ei, bb->preds) + all_flags |= e->flags; + if (all_flags & EDGE_COMPLEX) + continue; + + bitmap_set_bit (candidates, bb->index); } - fprintf (stderr, "%*s", indent, ""); - fprintf (stderr, "{ level %d (block %p)\n", s->level, - (PTR) NOTE_BLOCK (s->note_beg)); + /* Nothing to do if there is no computed jump here. */ + if (bitmap_empty_p (candidates)) + goto done; - fprintf (stderr, "%*s%s", indent, "", "bbs:"); - for (i = 0; i < s->num_bbs; i++) - fprintf (stderr, " %d", s->bbs[i]->index); - fprintf (stderr, "\n"); - - for (p = s->inner; p; p = p->next) - dump_scope_forest_1 (p, indent + 2); + /* Duplicate computed gotos. */ + FOR_EACH_BB (bb) + { + if (bb->il.rtl->visited) + continue; - fprintf (stderr, "%*s", indent, ""); - fprintf (stderr, "}\n"); -} + bb->il.rtl->visited = 1; + /* BB must have one outgoing edge. That edge must not lead to + the exit block or the next block. + The destination must have more than one predecessor. */ + if (!single_succ_p (bb) + || single_succ (bb) == EXIT_BLOCK_PTR + || single_succ (bb) == bb->next_bb + || single_pred_p (single_succ (bb))) + continue; -/* Reorder basic blocks. The main entry point to this file. */ + /* The successor block has to be a duplication candidate. */ + if (!bitmap_bit_p (candidates, single_succ (bb)->index)) + continue; -void -reorder_basic_blocks () + new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb); + new_bb->aux = bb->aux; + bb->aux = new_bb; + new_bb->il.rtl->visited = 1; + } + +done: + cfg_layout_finalize (); + + BITMAP_FREE (candidates); + return 0; +} + +struct tree_opt_pass pass_duplicate_computed_gotos = { - scope_forest_info forest; - int i; + "compgotos", /* name */ + gate_duplicate_computed_gotos, /* gate */ + duplicate_computed_gotos, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_REORDER_BLOCKS, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 0 /* letter */ +}; + + +/* This function is the main 'entrance' for the optimization that + partitions hot and cold basic blocks into separate sections of the + .o file (to improve performance and cache locality). Ideally it + would be called after all optimizations that rearrange the CFG have + been called. However part of this optimization may introduce new + register usage, so it must be called before register allocation has + occurred. This means that this optimization is actually called + well before the optimization that reorders basic blocks (see + function above). + + This optimization checks the feedback information to determine + which basic blocks are hot/cold, updates flags on the basic blocks + to indicate which section they belong in. This information is + later used for writing out sections in the .o file. Because hot + and cold sections can be arbitrarily large (within the bounds of + memory), far beyond the size of a single function, it is necessary + to fix up all edges that cross section boundaries, to make sure the + instructions used can actually span the required distance. The + fixes are described below. + + Fall-through edges must be changed into jumps; it is not safe or + legal to fall through across a section boundary. Whenever a + fall-through edge crossing a section boundary is encountered, a new + basic block is inserted (in the same section as the fall-through + source), and the fall through edge is redirected to the new basic + block. The new basic block contains an unconditional jump to the + original fall-through target. (If the unconditional jump is + insufficient to cross section boundaries, that is dealt with a + little later, see below). + + In order to deal with architectures that have short conditional + branches (which cannot span all of memory) we take any conditional + jump that attempts to cross a section boundary and add a level of + indirection: it becomes a conditional jump to a new basic block, in + the same section. The new basic block contains an unconditional + jump to the original target, in the other section. + + For those architectures whose unconditional branch is also + incapable of reaching all of memory, those unconditional jumps are + converted into indirect jumps, through a register. + + IMPORTANT NOTE: This optimization causes some messy interactions + with the cfg cleanup optimizations; those optimizations want to + merge blocks wherever possible, and to collapse indirect jump + sequences (change "A jumps to B jumps to C" directly into "A jumps + to C"). Those optimizations can undo the jump fixes that + partitioning is required to make (see above), in order to ensure + that jumps attempting to cross section boundaries are really able + to cover whatever distance the jump requires (on many architectures + conditional or unconditional jumps are not able to reach all of + memory). Therefore tests have to be inserted into each such + optimization to make sure that it does not undo stuff necessary to + cross partition boundaries. This would be much less of a problem + if we could perform this optimization later in the compilation, but + unfortunately the fact that we may need to create indirect jumps + (through registers) requires that this optimization be performed + before register allocation. */ - if (n_basic_blocks <= 1) +static void +partition_hot_cold_basic_blocks (void) +{ + basic_block cur_bb; + edge *crossing_edges; + int n_crossing_edges; + int max_edges = 2 * last_basic_block; + + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) return; + + crossing_edges = XCNEWVEC (edge, max_edges); - for (i = 0; i < n_basic_blocks; i++) - BASIC_BLOCK (i)->aux = xcalloc (1, sizeof (struct reorder_block_def)); + cfg_layout_initialize (0); + + FOR_EACH_BB (cur_bb) + if (cur_bb->index >= NUM_FIXED_BLOCKS + && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS) + cur_bb->aux = cur_bb->next_bb; + + find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges, + &n_crossing_edges, + &max_edges); + + if (n_crossing_edges > 0) + fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges); + + free (crossing_edges); - EXIT_BLOCK_PTR->aux = xcalloc (1, sizeof (struct reorder_block_def)); + cfg_layout_finalize(); +} + +static bool +gate_handle_reorder_blocks (void) +{ + return (optimize > 0); +} - build_scope_forest (&forest); - remove_scope_notes (); - record_effective_endpoints (); - make_reorder_chain (); - fixup_reorder_chain (); +/* Reorder basic blocks. */ +static unsigned int +rest_of_handle_reorder_blocks (void) +{ + bool changed; + unsigned int liveness_flags; -#ifdef ENABLE_CHECKING - verify_insn_chain (); -#endif + /* Last attempt to optimize CFG, as scheduling, peepholing and insn + splitting possibly introduced more crossjumping opportunities. */ + liveness_flags = (!HAVE_conditional_execution ? CLEANUP_UPDATE_LIFE : 0); + changed = cleanup_cfg (CLEANUP_EXPENSIVE | liveness_flags); - rebuild_scope_notes (&forest); - free_scope_forest (&forest); - reorder_blocks (); + if (flag_sched2_use_traces && flag_schedule_insns_after_reload) + { + timevar_push (TV_TRACER); + tracer (liveness_flags); + timevar_pop (TV_TRACER); + } - for (i = 0; i < n_basic_blocks; i++) - free (BASIC_BLOCK (i)->aux); + if (flag_reorder_blocks || flag_reorder_blocks_and_partition) + reorder_basic_blocks (liveness_flags); + if (flag_reorder_blocks || flag_reorder_blocks_and_partition + || (flag_sched2_use_traces && flag_schedule_insns_after_reload)) + changed |= cleanup_cfg (CLEANUP_EXPENSIVE | liveness_flags); + + /* On conditional execution targets we can not update the life cheaply, so + we deffer the updating to after both cleanups. This may lose some cases + but should not be terribly bad. */ + if (changed && HAVE_conditional_execution) + update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, + PROP_DEATH_NOTES); + + /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */ + insert_section_boundary_note (); + return 0; +} - free (EXIT_BLOCK_PTR->aux); +struct tree_opt_pass pass_reorder_blocks = +{ + "bbro", /* name */ + gate_handle_reorder_blocks, /* gate */ + rest_of_handle_reorder_blocks, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_REORDER_BLOCKS, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 'B' /* letter */ +}; + +static bool +gate_handle_partition_blocks (void) +{ + /* The optimization to partition hot/cold basic blocks into separate + sections of the .o file does not work well with linkonce or with + user defined section attributes. Don't call it if either case + arises. */ + + return (flag_reorder_blocks_and_partition + && !DECL_ONE_ONLY (current_function_decl) + && !user_defined_section_attribute); +} -#ifdef ENABLE_CHECKING - verify_flow_info (); -#endif +/* Partition hot and cold basic blocks. */ +static unsigned int +rest_of_handle_partition_blocks (void) +{ + no_new_pseudos = 0; + partition_hot_cold_basic_blocks (); + allocate_reg_life_data (); + update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, + PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES); + no_new_pseudos = 1; + return 0; } + +struct tree_opt_pass pass_partition_blocks = +{ + "bbpart", /* name */ + gate_handle_partition_blocks, /* gate */ + rest_of_handle_partition_blocks, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_REORDER_BLOCKS, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 0 /* letter */ +}; + +