X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fpredict.c;h=f338789729ec23a030c7e46b6351a36613afc29b;hb=e561fc1cb16687a153589e70e16ede8de11116af;hp=ce8ed2d04496c4a0813f871448fc03b97e12da97;hpb=345ac34a19ee8fefc1d767f4eb9103a781c641d3;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/predict.c b/gcc/predict.c index ce8ed2d0449..f338789729e 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -1,5 +1,5 @@ /* Branch prediction routines for the GNU compiler. - Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc. + Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. @@ -30,6 +30,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "config.h" #include "system.h" +#include "coretypes.h" +#include "tm.h" #include "tree.h" #include "rtl.h" #include "tm_p.h" @@ -45,41 +47,40 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "recog.h" #include "expr.h" #include "predict.h" -#include "profile.h" -#include "real.h" +#include "coverage.h" +#include "sreal.h" #include "params.h" #include "target.h" - -/* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE, 0.5, - REAL_BB_FREQ_MAX. */ -static REAL_VALUE_TYPE real_zero, real_one, real_almost_one, real_br_prob_base, - real_one_half, real_bb_freq_max; +#include "cfgloop.h" +#include "tree-flow.h" +#include "ggc.h" +#include "tree-dump.h" +#include "tree-pass.h" +#include "timevar.h" +#include "tree-scalar-evolution.h" +#include "cfgloop.h" + +/* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE, + 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */ +static sreal real_zero, real_one, real_almost_one, real_br_prob_base, + real_inv_br_prob_base, real_one_half, real_bb_freq_max; /* Random guesstimation given names. */ -#define PROB_NEVER (0) #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 10 - 1) -#define PROB_UNLIKELY (REG_BR_PROB_BASE * 4 / 10 - 1) #define PROB_EVEN (REG_BR_PROB_BASE / 2) -#define PROB_LIKELY (REG_BR_PROB_BASE - PROB_UNLIKELY) #define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY) #define PROB_ALWAYS (REG_BR_PROB_BASE) -static bool predicted_by_p PARAMS ((basic_block, - enum br_predictor)); -static void combine_predictions_for_insn PARAMS ((rtx, basic_block)); -static void dump_prediction PARAMS ((enum br_predictor, int, - basic_block, int)); -static void estimate_loops_at_level PARAMS ((struct loop *loop)); -static void propagate_freq PARAMS ((basic_block)); -static void estimate_bb_frequencies PARAMS ((struct loops *)); -static void counts_to_freqs PARAMS ((void)); -static void process_note_predictions PARAMS ((basic_block, int *, int *, - sbitmap *)); -static void process_note_prediction PARAMS ((basic_block, int *, int *, - sbitmap *, int, int)); -static bool last_basic_block_p PARAMS ((basic_block)); -static void compute_function_frequency PARAMS ((void)); -static void choose_function_section PARAMS ((void)); +static void combine_predictions_for_insn (rtx, basic_block); +static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int); +static void estimate_loops_at_level (struct loop *loop); +static void propagate_freq (struct loop *); +static void estimate_bb_frequencies (struct loops *); +static void predict_paths_leading_to (basic_block, int *, enum br_predictor, enum prediction); +static bool last_basic_block_p (basic_block); +static void compute_function_frequency (void); +static void choose_function_section (void); +static bool can_predict_insn_p (rtx); /* Information we hold about each branch predictor. Filled using information from predict.def. */ @@ -110,17 +111,14 @@ static const struct predictor_info predictor_info[]= { #undef DEF_PREDICTOR /* Return true in case BB can be CPU intensive and should be optimized - for maximal perofmrance. */ + for maximal performance. */ bool -maybe_hot_bb_p (bb) - basic_block bb; +maybe_hot_bb_p (basic_block bb) { - if (profile_info.count_profiles_merged - && flag_branch_probabilities + if (profile_info && flag_branch_probabilities && (bb->count - < profile_info.max_counter_in_program - / PARAM_VALUE (HOT_BB_COUNT_FRACTION))) + < profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION))) return false; if (bb->frequency < BB_FREQ_MAX / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)) return false; @@ -130,14 +128,11 @@ maybe_hot_bb_p (bb) /* Return true in case BB is cold and should be optimized for size. */ bool -probably_cold_bb_p (bb) - basic_block bb; +probably_cold_bb_p (basic_block bb) { - if (profile_info.count_profiles_merged - && flag_branch_probabilities + if (profile_info && flag_branch_probabilities && (bb->count - < profile_info.max_counter_in_program - / PARAM_VALUE (HOT_BB_COUNT_FRACTION))) + < profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION))) return true; if (bb->frequency < BB_FREQ_MAX / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)) return true; @@ -146,42 +141,49 @@ probably_cold_bb_p (bb) /* Return true in case BB is probably never executed. */ bool -probably_never_executed_bb_p (bb) - basic_block bb; +probably_never_executed_bb_p (basic_block bb) { - if (profile_info.count_profiles_merged - && flag_branch_probabilities) - return ((bb->count + profile_info.count_profiles_merged / 2) - / profile_info.count_profiles_merged) == 0; + if (profile_info && flag_branch_probabilities) + return ((bb->count + profile_info->runs / 2) / profile_info->runs) == 0; return false; } /* Return true if the one of outgoing edges is already predicted by PREDICTOR. */ -static bool -predicted_by_p (bb, predictor) - basic_block bb; - enum br_predictor predictor; +bool +rtl_predicted_by_p (basic_block bb, enum br_predictor predictor) { rtx note; - if (!INSN_P (bb->end)) + if (!INSN_P (BB_END (bb))) return false; - for (note = REG_NOTES (bb->end); note; note = XEXP (note, 1)) + for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_BR_PRED && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor) return true; return false; } +/* Return true if the one of outgoing edges is already predicted by + PREDICTOR. */ + +bool +tree_predicted_by_p (basic_block bb, enum br_predictor predictor) +{ + struct edge_prediction *i = bb_ann (bb)->predictions; + for (i = bb_ann (bb)->predictions; i; i = i->next) + if (i->predictor == predictor) + return true; + return false; +} + void -predict_insn (insn, predictor, probability) - rtx insn; - int probability; - enum br_predictor predictor; +predict_insn (rtx insn, enum br_predictor predictor, int probability) { if (!any_condjump_p (insn)) abort (); + if (!flag_guess_branch_prob) + return; REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PRED, @@ -194,10 +196,8 @@ predict_insn (insn, predictor, probability) /* Predict insn by given predictor. */ void -predict_insn_def (insn, predictor, taken) - rtx insn; - enum br_predictor predictor; - enum prediction taken; +predict_insn_def (rtx insn, enum br_predictor predictor, + enum prediction taken) { int probability = predictor_info[(int) predictor].hitrate; @@ -210,13 +210,10 @@ predict_insn_def (insn, predictor, taken) /* Predict edge E with given probability if possible. */ void -predict_edge (e, predictor, probability) - edge e; - int probability; - enum br_predictor predictor; +rtl_predict_edge (edge e, enum br_predictor predictor, int probability) { rtx last_insn; - last_insn = e->src->end; + last_insn = BB_END (e->src); /* We can store the branch prediction information only about conditional jumps. */ @@ -230,13 +227,35 @@ predict_edge (e, predictor, probability) predict_insn (last_insn, predictor, probability); } +/* Predict edge E with the given PROBABILITY. */ +void +tree_predict_edge (edge e, enum br_predictor predictor, int probability) +{ + struct edge_prediction *i = ggc_alloc (sizeof (struct edge_prediction)); + + i->next = bb_ann (e->src)->predictions; + bb_ann (e->src)->predictions = i; + i->probability = probability; + i->predictor = predictor; + i->edge = e; +} + +/* Return true when we can store prediction on insn INSN. + At the moment we represent predictions only on conditional + jumps, not at computed jump or other complicated cases. */ +static bool +can_predict_insn_p (rtx insn) +{ + return (JUMP_P (insn) + && any_condjump_p (insn) + && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2); +} + /* Predict edge E by given predictor if possible. */ void -predict_edge_def (e, predictor, taken) - edge e; - enum br_predictor predictor; - enum prediction taken; +predict_edge_def (edge e, enum br_predictor predictor, + enum prediction taken) { int probability = predictor_info[(int) predictor].hitrate; @@ -250,8 +269,7 @@ predict_edge_def (e, predictor, taken) to be done each time we invert the condition used by the jump. */ void -invert_br_probabilities (insn) - rtx insn; +invert_br_probabilities (rtx insn) { rtx note; @@ -266,49 +284,65 @@ invert_br_probabilities (insn) /* Dump information about the branch prediction to the output file. */ static void -dump_prediction (predictor, probability, bb, used) - enum br_predictor predictor; - int probability; - basic_block bb; - int used; +dump_prediction (FILE *file, enum br_predictor predictor, int probability, + basic_block bb, int used) { - edge e = bb->succ; + edge e; + edge_iterator ei; - if (!rtl_dump_file) + if (!file) return; - while (e && (e->flags & EDGE_FALLTHRU)) - e = e->succ_next; + FOR_EACH_EDGE (e, ei, bb->succs) + if (! (e->flags & EDGE_FALLTHRU)) + break; - fprintf (rtl_dump_file, " %s heuristics%s: %.1f%%", + fprintf (file, " %s heuristics%s: %.1f%%", predictor_info[predictor].name, used ? "" : " (ignored)", probability * 100.0 / REG_BR_PROB_BASE); if (bb->count) { - fprintf (rtl_dump_file, " exec "); - fprintf (rtl_dump_file, HOST_WIDEST_INT_PRINT_DEC, bb->count); + fprintf (file, " exec "); + fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count); if (e) { - fprintf (rtl_dump_file, " hit "); - fprintf (rtl_dump_file, HOST_WIDEST_INT_PRINT_DEC, e->count); - fprintf (rtl_dump_file, " (%.1f%%)", e->count * 100.0 / bb->count); + fprintf (file, " hit "); + fprintf (file, HOST_WIDEST_INT_PRINT_DEC, e->count); + fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count); } } - fprintf (rtl_dump_file, "\n"); + fprintf (file, "\n"); +} + +/* We can not predict the probabilities of outgoing edges of bb. Set them + evenly and hope for the best. */ +static void +set_even_probabilities (basic_block bb) +{ + int nedges = 0; + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + if (!(e->flags & (EDGE_EH | EDGE_FAKE))) + nedges ++; + FOR_EACH_EDGE (e, ei, bb->succs) + if (!(e->flags & (EDGE_EH | EDGE_FAKE))) + e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; + else + e->probability = 0; } /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB note if not already present. Remove now useless REG_BR_PRED notes. */ static void -combine_predictions_for_insn (insn, bb) - rtx insn; - basic_block bb; +combine_predictions_for_insn (rtx insn, basic_block bb) { - rtx prob_note = find_reg_note (insn, REG_BR_PROB, 0); - rtx *pnote = ®_NOTES (insn); + rtx prob_note; + rtx *pnote; rtx note; int best_probability = PROB_EVEN; int best_predictor = END_PREDICTORS; @@ -317,13 +351,20 @@ combine_predictions_for_insn (insn, bb) bool first_match = false; bool found = false; - if (rtl_dump_file) - fprintf (rtl_dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn), + if (!can_predict_insn_p (insn)) + { + set_even_probabilities (bb); + return; + } + + prob_note = find_reg_note (insn, REG_BR_PROB, 0); + pnote = ®_NOTES (insn); + if (dump_file) + fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn), bb->index); /* We implement "first match" heuristics and use probability guessed - by predictor with smallest index. In the future we will use better - probability combination techniques. */ + by predictor with smallest index. */ for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_BR_PRED) { @@ -355,16 +396,19 @@ combine_predictions_for_insn (insn, bb) first_match = true; if (!found) - dump_prediction (PRED_NO_PREDICTION, combined_probability, bb, true); + dump_prediction (dump_file, PRED_NO_PREDICTION, + combined_probability, bb, true); else { - dump_prediction (PRED_DS_THEORY, combined_probability, bb, !first_match); - dump_prediction (PRED_FIRST_MATCH, best_probability, bb, first_match); + dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, + bb, !first_match); + dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, + bb, first_match); } if (first_match) combined_probability = best_probability; - dump_prediction (PRED_COMBINED, combined_probability, bb, true); + dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true); while (*pnote) { @@ -373,7 +417,7 @@ combine_predictions_for_insn (insn, bb) int predictor = INTVAL (XEXP (XEXP (*pnote, 0), 0)); int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1)); - dump_prediction (predictor, probability, bb, + dump_prediction (dump_file, predictor, probability, bb, !first_match || best_predictor == predictor); *pnote = XEXP (*pnote, 1); } @@ -389,111 +433,416 @@ combine_predictions_for_insn (insn, bb) /* Save the prediction into CFG in case we are seeing non-degenerated conditional jump. */ - if (bb->succ->succ_next) + if (EDGE_COUNT (bb->succs) > 1) { BRANCH_EDGE (bb)->probability = combined_probability; FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - combined_probability; } } + else if (EDGE_COUNT (bb->succs) > 1) + { + int prob = INTVAL (XEXP (prob_note, 0)); + + BRANCH_EDGE (bb)->probability = prob; + FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob; + } + else + EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE; } -/* Statically estimate the probability that a branch will be taken. - ??? In the next revision there will be a number of other predictors added - from the above references. Further, each heuristic will be factored out - into its own function for clarity (and to facilitate the combination of - predictions). */ +/* Combine predictions into single probability and store them into CFG. + Remove now useless prediction entries. */ -void -estimate_probability (loops_info) - struct loops *loops_info; +static void +combine_predictions_for_bb (FILE *file, basic_block bb) +{ + int best_probability = PROB_EVEN; + int best_predictor = END_PREDICTORS; + int combined_probability = REG_BR_PROB_BASE / 2; + int d; + bool first_match = false; + bool found = false; + struct edge_prediction *pred; + int nedges = 0; + edge e, first = NULL, second = NULL; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + if (!(e->flags & (EDGE_EH | EDGE_FAKE))) + { + nedges ++; + if (first && !second) + second = e; + if (!first) + first = e; + } + + /* When there is no successor or only one choice, prediction is easy. + + We are lazy for now and predict only basic blocks with two outgoing + edges. It is possible to predict generic case too, but we have to + ignore first match heuristics and do more involved combining. Implement + this later. */ + if (nedges != 2) + { + if (!bb->count) + set_even_probabilities (bb); + bb_ann (bb)->predictions = NULL; + if (file) + fprintf (file, "%i edges in bb %i predicted to even probabilities\n", + nedges, bb->index); + return; + } + + if (file) + fprintf (file, "Predictions for bb %i\n", bb->index); + + /* We implement "first match" heuristics and use probability guessed + by predictor with smallest index. */ + for (pred = bb_ann (bb)->predictions; pred; pred = pred->next) + { + int predictor = pred->predictor; + int probability = pred->probability; + + if (pred->edge != first) + probability = REG_BR_PROB_BASE - probability; + + found = true; + if (best_predictor > predictor) + best_probability = probability, best_predictor = predictor; + + d = (combined_probability * probability + + (REG_BR_PROB_BASE - combined_probability) + * (REG_BR_PROB_BASE - probability)); + + /* Use FP math to avoid overflows of 32bit integers. */ + if (d == 0) + /* If one probability is 0% and one 100%, avoid division by zero. */ + combined_probability = REG_BR_PROB_BASE / 2; + else + combined_probability = (((double) combined_probability) * probability + * REG_BR_PROB_BASE / d + 0.5); + } + + /* Decide which heuristic to use. In case we didn't match anything, + use no_prediction heuristic, in case we did match, use either + first match or Dempster-Shaffer theory depending on the flags. */ + + if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH) + first_match = true; + + if (!found) + dump_prediction (file, PRED_NO_PREDICTION, combined_probability, bb, true); + else + { + dump_prediction (file, PRED_DS_THEORY, combined_probability, bb, + !first_match); + dump_prediction (file, PRED_FIRST_MATCH, best_probability, bb, + first_match); + } + + if (first_match) + combined_probability = best_probability; + dump_prediction (file, PRED_COMBINED, combined_probability, bb, true); + + for (pred = bb_ann (bb)->predictions; pred; pred = pred->next) + { + int predictor = pred->predictor; + int probability = pred->probability; + + if (pred->edge != EDGE_SUCC (bb, 0)) + probability = REG_BR_PROB_BASE - probability; + dump_prediction (file, predictor, probability, bb, + !first_match || best_predictor == predictor); + } + bb_ann (bb)->predictions = NULL; + + if (!bb->count) + { + first->probability = combined_probability; + second->probability = REG_BR_PROB_BASE - combined_probability; + } +} + +/* Predict edge probabilities by exploiting loop structure. + When RTLSIMPLELOOPS is set, attempt to count number of iterations by analyzing + RTL otherwise use tree based approach. */ +static void +predict_loops (struct loops *loops_info, bool rtlsimpleloops) { - sbitmap *dominators, *post_dominators; - int i; + unsigned i; - dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); - post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); - calculate_dominance_info (NULL, dominators, CDI_DOMINATORS); - calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS); + if (!rtlsimpleloops) + scev_initialize (loops_info); /* Try to predict out blocks in a loop that are not part of a natural loop. */ - for (i = 0; i < loops_info->num; i++) + for (i = 1; i < loops_info->num; i++) { - int j; + basic_block bb, *bbs; + unsigned j; int exits; - struct loop *loop = &loops_info->array[i]; + struct loop *loop = loops_info->parray[i]; + struct niter_desc desc; + unsigned HOST_WIDE_INT niter; - flow_loop_scan (loops_info, loop, LOOP_EXIT_EDGES); + flow_loop_scan (loop, LOOP_EXIT_EDGES); exits = loop->num_exits; - for (j = loop->first->index; j <= loop->last->index; ++j) - if (TEST_BIT (loop->nodes, j)) - { - int header_found = 0; - edge e; + if (rtlsimpleloops) + { + iv_analysis_loop_init (loop); + find_simple_exit (loop, &desc); + + if (desc.simple_p && desc.const_iter) + { + int prob; + niter = desc.niter + 1; + if (niter == 0) /* We might overflow here. */ + niter = desc.niter; + + prob = (REG_BR_PROB_BASE + - (REG_BR_PROB_BASE + niter /2) / niter); + /* Branch prediction algorithm gives 0 frequency for everything + after the end of loop for loop having 0 probability to finish. */ + if (prob == REG_BR_PROB_BASE) + prob = REG_BR_PROB_BASE - 1; + predict_edge (desc.in_edge, PRED_LOOP_ITERATIONS, + prob); + } + } + else + { + edge *exits; + unsigned j, n_exits; + struct tree_niter_desc niter_desc; + + exits = get_loop_exit_edges (loop, &n_exits); + for (j = 0; j < n_exits; j++) + { + tree niter = NULL; + + if (number_of_iterations_exit (loop, exits[j], &niter_desc)) + niter = niter_desc.niter; + if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST) + niter = loop_niter_by_eval (loop, exits[j]); + + if (TREE_CODE (niter) == INTEGER_CST) + { + int probability; + if (host_integerp (niter, 1) + && tree_int_cst_lt (niter, + build_int_cstu (NULL_TREE, + REG_BR_PROB_BASE - 1))) + { + HOST_WIDE_INT nitercst = tree_low_cst (niter, 1) + 1; + probability = (REG_BR_PROB_BASE + nitercst / 2) / nitercst; + } + else + probability = 1; + + predict_edge (exits[j], PRED_LOOP_ITERATIONS, probability); + } + } + + free (exits); + } + + bbs = get_loop_body (loop); + + for (j = 0; j < loop->num_nodes; j++) + { + int header_found = 0; + edge e; + edge_iterator ei; + + bb = bbs[j]; /* Bypass loop heuristics on continue statement. These statements construct loops via "non-loop" constructs in the source language and are better to be handled separately. */ - if (predicted_by_p (BASIC_BLOCK (j), PRED_CONTINUE)) + if ((rtlsimpleloops && !can_predict_insn_p (BB_END (bb))) + || predicted_by_p (bb, PRED_CONTINUE)) continue; - /* Loop branch heuristics - predict an edge back to a - loop's head as taken. */ - for (e = BASIC_BLOCK(j)->succ; e; e = e->succ_next) - if (e->dest == loop->header - && e->src == loop->latch) - { - header_found = 1; - predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN); - } + /* Loop branch heuristics - predict an edge back to a + loop's head as taken. */ + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->dest == loop->header + && e->src == loop->latch) + { + header_found = 1; + predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN); + } - /* Loop exit heuristics - predict an edge exiting the loop if the - conditinal has no loop header successors as not taken. */ - if (!header_found) - for (e = BASIC_BLOCK(j)->succ; e; e = e->succ_next) - if (e->dest->index < 0 - || !TEST_BIT (loop->nodes, e->dest->index)) - predict_edge - (e, PRED_LOOP_EXIT, - (REG_BR_PROB_BASE - - predictor_info [(int) PRED_LOOP_EXIT].hitrate) - / exits); - } + /* Loop exit heuristics - predict an edge exiting the loop if the + conditional has no loop header successors as not taken. */ + if (!header_found) + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->dest->index < 0 + || !flow_bb_inside_loop_p (loop, e->dest)) + predict_edge + (e, PRED_LOOP_EXIT, + (REG_BR_PROB_BASE + - predictor_info [(int) PRED_LOOP_EXIT].hitrate) + / exits); + } + + /* Free basic blocks from get_loop_body. */ + free (bbs); + } + + if (!rtlsimpleloops) + scev_finalize (); +} + +/* Attempt to predict probabilities of BB outgoing edges using local + properties. */ +static void +bb_estimate_probability_locally (basic_block bb) +{ + rtx last_insn = BB_END (bb); + rtx cond; + + if (! can_predict_insn_p (last_insn)) + return; + cond = get_condition (last_insn, NULL, false, false); + if (! cond) + return; + + /* Try "pointer heuristic." + A comparison ptr == 0 is predicted as false. + Similarly, a comparison ptr1 == ptr2 is predicted as false. */ + if (COMPARISON_P (cond) + && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0))) + || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1))))) + { + if (GET_CODE (cond) == EQ) + predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN); + else if (GET_CODE (cond) == NE) + predict_insn_def (last_insn, PRED_POINTER, TAKEN); } + else + + /* Try "opcode heuristic." + EQ tests are usually false and NE tests are usually true. Also, + most quantities are positive, so we can make the appropriate guesses + about signed comparisons against zero. */ + switch (GET_CODE (cond)) + { + case CONST_INT: + /* Unconditional branch. */ + predict_insn_def (last_insn, PRED_UNCONDITIONAL, + cond == const0_rtx ? NOT_TAKEN : TAKEN); + break; + + case EQ: + case UNEQ: + /* Floating point comparisons appears to behave in a very + unpredictable way because of special role of = tests in + FP code. */ + if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) + ; + /* Comparisons with 0 are often used for booleans and there is + nothing useful to predict about them. */ + else if (XEXP (cond, 1) == const0_rtx + || XEXP (cond, 0) == const0_rtx) + ; + else + predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN); + break; + + case NE: + case LTGT: + /* Floating point comparisons appears to behave in a very + unpredictable way because of special role of = tests in + FP code. */ + if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) + ; + /* Comparisons with 0 are often used for booleans and there is + nothing useful to predict about them. */ + else if (XEXP (cond, 1) == const0_rtx + || XEXP (cond, 0) == const0_rtx) + ; + else + predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN); + break; + + case ORDERED: + predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN); + break; + + case UNORDERED: + predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN); + break; + + case LE: + case LT: + if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx + || XEXP (cond, 1) == constm1_rtx) + predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN); + break; + + case GE: + case GT: + if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx + || XEXP (cond, 1) == constm1_rtx) + predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN); + break; + + default: + break; + } +} + +/* Statically estimate the probability that a branch will be taken and produce + estimated profile. When profile feedback is present never executed portions + of function gets estimated. */ + +void +estimate_probability (struct loops *loops_info) +{ + basic_block bb; + + connect_infinite_loops_to_exit (); + calculate_dominance_info (CDI_DOMINATORS); + calculate_dominance_info (CDI_POST_DOMINATORS); + + predict_loops (loops_info, true); + + iv_analysis_done (); /* Attempt to predict conditional jumps using a number of heuristics. */ - for (i = 0; i < n_basic_blocks; i++) + FOR_EACH_BB (bb) { - basic_block bb = BASIC_BLOCK (i); - rtx last_insn = bb->end; - rtx cond, earliest; + rtx last_insn = BB_END (bb); edge e; + edge_iterator ei; - if (GET_CODE (last_insn) != JUMP_INSN || ! any_condjump_p (last_insn)) + if (! can_predict_insn_p (last_insn)) continue; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths trought function. */ if ((e->dest == EXIT_BLOCK_PTR - || (e->dest->succ && !e->dest->succ->succ_next - && e->dest->succ->dest == EXIT_BLOCK_PTR)) + || (EDGE_COUNT (e->dest->succs) == 1 + && EDGE_SUCC (e->dest, 0)->dest == EXIT_BLOCK_PTR)) && !predicted_by_p (bb, PRED_NULL_RETURN) && !predicted_by_p (bb, PRED_CONST_RETURN) && !predicted_by_p (bb, PRED_NEGATIVE_RETURN) && !last_basic_block_p (e->dest)) predict_edge_def (e, PRED_EARLY_RETURN, TAKEN); - /* Look for block we are guarding (ie we dominate it, + /* Look for block we are guarding (i.e. we dominate it, but it doesn't postdominate us). */ if (e->dest != EXIT_BLOCK_PTR && e->dest != bb - && TEST_BIT (dominators[e->dest->index], e->src->index) - && !TEST_BIT (post_dominators[e->src->index], e->dest->index)) + && dominated_by_p (CDI_DOMINATORS, e->dest, e->src) + && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest)) { rtx insn; @@ -501,9 +850,9 @@ estimate_probability (loops_info) is improbable. This is because such calls are often used to signal exceptional situations such as printing error messages. */ - for (insn = e->dest->head; insn != NEXT_INSN (e->dest->end); + for (insn = BB_HEAD (e->dest); insn != NEXT_INSN (BB_END (e->dest)); insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == CALL_INSN + if (CALL_P (insn) /* Constant and pure calls are hardly used to signalize something exceptional. */ && ! CONST_OR_PURE_CALL_P (insn)) @@ -513,107 +862,516 @@ estimate_probability (loops_info) } } } + bb_estimate_probability_locally (bb); + } - cond = get_condition (last_insn, &earliest); - if (! cond) - continue; + /* Attach the combined probability to each conditional jump. */ + FOR_EACH_BB (bb) + combine_predictions_for_insn (BB_END (bb), bb); - /* Try "pointer heuristic." - A comparison ptr == 0 is predicted as false. - Similarly, a comparison ptr1 == ptr2 is predicted as false. */ - if (GET_RTX_CLASS (GET_CODE (cond)) == '<' - && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0))) - || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1))))) + remove_fake_edges (); + estimate_bb_frequencies (loops_info); + free_dominance_info (CDI_POST_DOMINATORS); + if (profile_status == PROFILE_ABSENT) + profile_status = PROFILE_GUESSED; +} + +/* Set edge->probability for each successor edge of BB. */ +void +guess_outgoing_edge_probabilities (basic_block bb) +{ + bb_estimate_probability_locally (bb); + combine_predictions_for_insn (BB_END (bb), bb); +} + +/* Return constant EXPR will likely have at execution time, NULL if unknown. + The function is used by builtin_expect branch predictor so the evidence + must come from this construct and additional possible constant folding. + + We may want to implement more involved value guess (such as value range + propagation based prediction), but such tricks shall go to new + implementation. */ + +static tree +expr_expected_value (tree expr, bitmap visited) +{ + if (TREE_CONSTANT (expr)) + return expr; + else if (TREE_CODE (expr) == SSA_NAME) + { + tree def = SSA_NAME_DEF_STMT (expr); + + /* If we were already here, break the infinite cycle. */ + if (bitmap_bit_p (visited, SSA_NAME_VERSION (expr))) + return NULL; + bitmap_set_bit (visited, SSA_NAME_VERSION (expr)); + + if (TREE_CODE (def) == PHI_NODE) + { + /* All the arguments of the PHI node must have the same constant + length. */ + int i; + tree val = NULL, new_val; + + for (i = 0; i < PHI_NUM_ARGS (def); i++) + { + tree arg = PHI_ARG_DEF (def, i); + + /* If this PHI has itself as an argument, we cannot + determine the string length of this argument. However, + if we can find a expected constant value for the other + PHI args then we can still be sure that this is + likely a constant. So be optimistic and just + continue with the next argument. */ + if (arg == PHI_RESULT (def)) + continue; + + new_val = expr_expected_value (arg, visited); + if (!new_val) + return NULL; + if (!val) + val = new_val; + else if (!operand_equal_p (val, new_val, false)) + return NULL; + } + return val; + } + if (TREE_CODE (def) != MODIFY_EXPR || TREE_OPERAND (def, 0) != expr) + return NULL; + return expr_expected_value (TREE_OPERAND (def, 1), visited); + } + else if (TREE_CODE (expr) == CALL_EXPR) + { + tree decl = get_callee_fndecl (expr); + if (!decl) + return NULL; + if (DECL_BUILT_IN (decl) && DECL_FUNCTION_CODE (decl) == BUILT_IN_EXPECT) { - if (GET_CODE (cond) == EQ) - predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN); - else if (GET_CODE (cond) == NE) - predict_insn_def (last_insn, PRED_POINTER, TAKEN); + tree arglist = TREE_OPERAND (expr, 1); + tree val; + + if (arglist == NULL_TREE + || TREE_CHAIN (arglist) == NULL_TREE) + return NULL; + val = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (expr, 1))); + if (TREE_CONSTANT (val)) + return val; + return TREE_VALUE (TREE_CHAIN (TREE_OPERAND (expr, 1))); } + } + if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr)) + { + tree op0, op1, res; + op0 = expr_expected_value (TREE_OPERAND (expr, 0), visited); + if (!op0) + return NULL; + op1 = expr_expected_value (TREE_OPERAND (expr, 1), visited); + if (!op1) + return NULL; + res = fold (build (TREE_CODE (expr), TREE_TYPE (expr), op0, op1)); + if (TREE_CONSTANT (res)) + return res; + return NULL; + } + if (UNARY_CLASS_P (expr)) + { + tree op0, res; + op0 = expr_expected_value (TREE_OPERAND (expr, 0), visited); + if (!op0) + return NULL; + res = fold (build1 (TREE_CODE (expr), TREE_TYPE (expr), op0)); + if (TREE_CONSTANT (res)) + return res; + return NULL; + } + return NULL; +} + +/* Get rid of all builtin_expect calls we no longer need. */ +static void +strip_builtin_expect (void) +{ + basic_block bb; + FOR_EACH_BB (bb) + { + block_stmt_iterator bi; + for (bi = bsi_start (bb); !bsi_end_p (bi); bsi_next (&bi)) + { + tree stmt = bsi_stmt (bi); + tree fndecl; + tree arglist; + + if (TREE_CODE (stmt) == MODIFY_EXPR + && TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR + && (fndecl = get_callee_fndecl (TREE_OPERAND (stmt, 1))) + && DECL_BUILT_IN (fndecl) + && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT + && (arglist = TREE_OPERAND (TREE_OPERAND (stmt, 1), 1)) + && TREE_CHAIN (arglist)) + { + TREE_OPERAND (stmt, 1) = TREE_VALUE (arglist); + modify_stmt (stmt); + } + } + } +} + +/* Predict using opcode of the last statement in basic block. */ +static void +tree_predict_by_opcode (basic_block bb) +{ + tree stmt = last_stmt (bb); + edge then_edge; + tree cond; + tree op0; + tree type; + tree val; + bitmap visited; + edge_iterator ei; + + if (!stmt || TREE_CODE (stmt) != COND_EXPR) + return; + FOR_EACH_EDGE (then_edge, ei, bb->succs) + if (then_edge->flags & EDGE_TRUE_VALUE) + break; + cond = TREE_OPERAND (stmt, 0); + if (!COMPARISON_CLASS_P (cond)) + return; + op0 = TREE_OPERAND (cond, 0); + type = TREE_TYPE (op0); + visited = BITMAP_XMALLOC (); + val = expr_expected_value (cond, visited); + BITMAP_XFREE (visited); + if (val) + { + if (integer_zerop (val)) + predict_edge_def (then_edge, PRED_BUILTIN_EXPECT, NOT_TAKEN); else + predict_edge_def (then_edge, PRED_BUILTIN_EXPECT, TAKEN); + return; + } + /* Try "pointer heuristic." + A comparison ptr == 0 is predicted as false. + Similarly, a comparison ptr1 == ptr2 is predicted as false. */ + if (POINTER_TYPE_P (type)) + { + if (TREE_CODE (cond) == EQ_EXPR) + predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN); + else if (TREE_CODE (cond) == NE_EXPR) + predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN); + } + else - /* Try "opcode heuristic." - EQ tests are usually false and NE tests are usually true. Also, - most quantities are positive, so we can make the appropriate guesses - about signed comparisons against zero. */ - switch (GET_CODE (cond)) - { - case CONST_INT: - /* Unconditional branch. */ - predict_insn_def (last_insn, PRED_UNCONDITIONAL, - cond == const0_rtx ? NOT_TAKEN : TAKEN); - break; + /* Try "opcode heuristic." + EQ tests are usually false and NE tests are usually true. Also, + most quantities are positive, so we can make the appropriate guesses + about signed comparisons against zero. */ + switch (TREE_CODE (cond)) + { + case EQ_EXPR: + case UNEQ_EXPR: + /* Floating point comparisons appears to behave in a very + unpredictable way because of special role of = tests in + FP code. */ + if (FLOAT_TYPE_P (type)) + ; + /* Comparisons with 0 are often used for booleans and there is + nothing useful to predict about them. */ + else if (integer_zerop (op0) + || integer_zerop (TREE_OPERAND (cond, 1))) + ; + else + predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN); + break; + + case NE_EXPR: + case LTGT_EXPR: + /* Floating point comparisons appears to behave in a very + unpredictable way because of special role of = tests in + FP code. */ + if (FLOAT_TYPE_P (type)) + ; + /* Comparisons with 0 are often used for booleans and there is + nothing useful to predict about them. */ + else if (integer_zerop (op0) + || integer_zerop (TREE_OPERAND (cond, 1))) + ; + else + predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN); + break; + + case ORDERED_EXPR: + predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN); + break; + + case UNORDERED_EXPR: + predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN); + break; + + case LE_EXPR: + case LT_EXPR: + if (integer_zerop (TREE_OPERAND (cond, 1)) + || integer_onep (TREE_OPERAND (cond, 1)) + || integer_all_onesp (TREE_OPERAND (cond, 1)) + || real_zerop (TREE_OPERAND (cond, 1)) + || real_onep (TREE_OPERAND (cond, 1)) + || real_minus_onep (TREE_OPERAND (cond, 1))) + predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN); + break; + + case GE_EXPR: + case GT_EXPR: + if (integer_zerop (TREE_OPERAND (cond, 1)) + || integer_onep (TREE_OPERAND (cond, 1)) + || integer_all_onesp (TREE_OPERAND (cond, 1)) + || real_zerop (TREE_OPERAND (cond, 1)) + || real_onep (TREE_OPERAND (cond, 1)) + || real_minus_onep (TREE_OPERAND (cond, 1))) + predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN); + break; + + default: + break; + } +} - case EQ: - case UNEQ: - /* Floating point comparisons appears to behave in a very - inpredictable way because of special role of = tests in - FP code. */ - if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) - ; - /* Comparisons with 0 are often used for booleans and there is - nothing usefull to predict about them. */ - else if (XEXP (cond, 1) == const0_rtx - || XEXP (cond, 0) == const0_rtx) - ; - else - predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN); - break; +/* Try to guess whether the value of return means error code. */ +static enum br_predictor +return_prediction (tree val, enum prediction *prediction) +{ + /* VOID. */ + if (!val) + return PRED_NO_PREDICTION; + /* Different heuristics for pointers and scalars. */ + if (POINTER_TYPE_P (TREE_TYPE (val))) + { + /* NULL is usually not returned. */ + if (integer_zerop (val)) + { + *prediction = NOT_TAKEN; + return PRED_NULL_RETURN; + } + } + else if (INTEGRAL_TYPE_P (TREE_TYPE (val))) + { + /* Negative return values are often used to indicate + errors. */ + if (TREE_CODE (val) == INTEGER_CST + && tree_int_cst_sgn (val) < 0) + { + *prediction = NOT_TAKEN; + return PRED_NEGATIVE_RETURN; + } + /* Constant return values seems to be commonly taken. + Zero/one often represent booleans so exclude them from the + heuristics. */ + if (TREE_CONSTANT (val) + && (!integer_zerop (val) && !integer_onep (val))) + { + *prediction = TAKEN; + return PRED_NEGATIVE_RETURN; + } + } + return PRED_NO_PREDICTION; +} - case NE: - case LTGT: - /* Floating point comparisons appears to behave in a very - inpredictable way because of special role of = tests in - FP code. */ - if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) - ; - /* Comparisons with 0 are often used for booleans and there is - nothing usefull to predict about them. */ - else if (XEXP (cond, 1) == const0_rtx - || XEXP (cond, 0) == const0_rtx) - ; - else - predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN); - break; +/* Find the basic block with return expression and look up for possible + return value trying to apply RETURN_PREDICTION heuristics. */ +static void +apply_return_prediction (int *heads) +{ + tree return_stmt; + tree return_val; + edge e; + tree phi; + int phi_num_args, i; + enum br_predictor pred; + enum prediction direction; + edge_iterator ei; - case ORDERED: - predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN); - break; + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) + { + return_stmt = last_stmt (e->src); + if (TREE_CODE (return_stmt) == RETURN_EXPR) + break; + } + if (!e) + return; + return_val = TREE_OPERAND (return_stmt, 0); + if (!return_val) + return; + if (TREE_CODE (return_val) == MODIFY_EXPR) + return_val = TREE_OPERAND (return_val, 1); + if (TREE_CODE (return_val) != SSA_NAME + || !SSA_NAME_DEF_STMT (return_val) + || TREE_CODE (SSA_NAME_DEF_STMT (return_val)) != PHI_NODE) + return; + phi = SSA_NAME_DEF_STMT (return_val); + while (phi) + { + tree next = PHI_CHAIN (phi); + if (PHI_RESULT (phi) == return_val) + break; + phi = next; + } + if (!phi) + return; + phi_num_args = PHI_NUM_ARGS (phi); + pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction); + + /* Avoid the degenerate case where all return values form the function + belongs to same category (ie they are all positive constants) + so we can hardly say something about them. */ + for (i = 1; i < phi_num_args; i++) + if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction)) + break; + if (i != phi_num_args) + for (i = 0; i < phi_num_args; i++) + { + pred = return_prediction (PHI_ARG_DEF (phi, i), &direction); + if (pred != PRED_NO_PREDICTION) + predict_paths_leading_to (PHI_ARG_EDGE (phi, i)->src, heads, pred, + direction); + } +} - case UNORDERED: - predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN); - break; +/* Look for basic block that contains unlikely to happen events + (such as noreturn calls) and mark all paths leading to execution + of this basic blocks as unlikely. */ - case LE: - case LT: - if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx - || XEXP (cond, 1) == constm1_rtx) - predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN); - break; +static void +tree_bb_level_predictions (void) +{ + basic_block bb; + int *heads; - case GE: - case GT: - if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx - || XEXP (cond, 1) == constm1_rtx) - predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN); - break; + heads = xmalloc (sizeof (int) * last_basic_block); + memset (heads, -1, sizeof (int) * last_basic_block); + heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block; - default: - break; - } + apply_return_prediction (heads); + + FOR_EACH_BB (bb) + { + block_stmt_iterator bsi = bsi_last (bb); + + for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) + { + tree stmt = bsi_stmt (bsi); + switch (TREE_CODE (stmt)) + { + case MODIFY_EXPR: + if (TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR) + { + stmt = TREE_OPERAND (stmt, 1); + goto call_expr; + } + break; + case CALL_EXPR: +call_expr:; + if (call_expr_flags (stmt) & ECF_NORETURN) + predict_paths_leading_to (bb, heads, PRED_NORETURN, + NOT_TAKEN); + break; + default: + break; + } + } } - /* Attach the combined probability to each conditional jump. */ - for (i = 0; i < n_basic_blocks; i++) - if (GET_CODE (BLOCK_END (i)) == JUMP_INSN - && any_condjump_p (BLOCK_END (i)) - && BASIC_BLOCK (i)->succ->succ_next != NULL) - combine_predictions_for_insn (BLOCK_END (i), BASIC_BLOCK (i)); + free (heads); +} + +/* Predict branch probabilities and estimate profile of the tree CFG. */ +static void +tree_estimate_probability (void) +{ + basic_block bb; + struct loops loops_info; - sbitmap_vector_free (post_dominators); - sbitmap_vector_free (dominators); + flow_loops_find (&loops_info, LOOP_TREE); + if (dump_file && (dump_flags & TDF_DETAILS)) + flow_loops_dump (&loops_info, dump_file, NULL, 0); - estimate_bb_frequencies (loops_info); + add_noreturn_fake_exit_edges (); + connect_infinite_loops_to_exit (); + calculate_dominance_info (CDI_DOMINATORS); + calculate_dominance_info (CDI_POST_DOMINATORS); + + tree_bb_level_predictions (); + + mark_irreducible_loops (&loops_info); + predict_loops (&loops_info, false); + + FOR_EACH_BB (bb) + { + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + { + /* Predict early returns to be probable, as we've already taken + care for error returns and other cases are often used for + fast paths trought function. */ + if (e->dest == EXIT_BLOCK_PTR + && TREE_CODE (last_stmt (bb)) == RETURN_EXPR + && EDGE_COUNT (bb->preds) > 1) + { + edge e1; + edge_iterator ei1; + + FOR_EACH_EDGE (e1, ei1, bb->preds) + if (!predicted_by_p (e1->src, PRED_NULL_RETURN) + && !predicted_by_p (e1->src, PRED_CONST_RETURN) + && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN) + && !last_basic_block_p (e1->src)) + predict_edge_def (e1, PRED_TREE_EARLY_RETURN, NOT_TAKEN); + } + + /* Look for block we are guarding (ie we dominate it, + but it doesn't postdominate us). */ + if (e->dest != EXIT_BLOCK_PTR && e->dest != bb + && dominated_by_p (CDI_DOMINATORS, e->dest, e->src) + && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest)) + { + block_stmt_iterator bi; + + /* The call heuristic claims that a guarded function call + is improbable. This is because such calls are often used + to signal exceptional situations such as printing error + messages. */ + for (bi = bsi_start (e->dest); !bsi_end_p (bi); + bsi_next (&bi)) + { + tree stmt = bsi_stmt (bi); + if ((TREE_CODE (stmt) == CALL_EXPR + || (TREE_CODE (stmt) == MODIFY_EXPR + && TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR)) + /* Constant and pure calls are hardly used to signalize + something exceptional. */ + && TREE_SIDE_EFFECTS (stmt)) + { + predict_edge_def (e, PRED_CALL, NOT_TAKEN); + break; + } + } + } + } + tree_predict_by_opcode (bb); + } + FOR_EACH_BB (bb) + combine_predictions_for_bb (dump_file, bb); + + if (0) /* FIXME: Enable once we are pass down the profile to RTL level. */ + strip_builtin_expect (); + estimate_bb_frequencies (&loops_info); + free_dominance_info (CDI_POST_DOMINATORS); + remove_fake_exit_edges (); + flow_loops_free (&loops_info); + if (dump_file && (dump_flags & TDF_DETAILS)) + dump_tree_cfg (dump_file, dump_flags); + if (profile_status == PROFILE_ABSENT) + profile_status = PROFILE_GUESSED; } /* __builtin_expect dropped tokens into the insn stream describing expected @@ -621,7 +1379,7 @@ estimate_probability (loops_info) values. */ void -expected_value_to_br_prob () +expected_value_to_br_prob (void) { rtx insn, cond, ev = NULL_RTX, ev_reg = NULL_RTX; @@ -647,7 +1405,7 @@ expected_value_to_br_prob () case JUMP_INSN: /* Look for simple conditional branches. If we haven't got an expected value yet, no point going further. */ - if (GET_CODE (insn) != JUMP_INSN || ev == NULL_RTX + if (!JUMP_P (insn) || ev == NULL_RTX || ! any_condjump_p (insn)) continue; break; @@ -669,7 +1427,8 @@ expected_value_to_br_prob () (lt r70, r71) Could use cselib to try and reduce this further. */ cond = XEXP (SET_SRC (pc_set (insn)), 0); - cond = canonicalize_condition (insn, cond, 0, NULL, ev_reg); + cond = canonicalize_condition (insn, cond, 0, NULL, ev_reg, + false, false); if (! cond || XEXP (cond, 0) != ev_reg || GET_CODE (XEXP (cond, 1)) != CONST_INT) continue; @@ -689,183 +1448,76 @@ expected_value_to_br_prob () } } -/* Check whether this is the last basic block of function. Commonly tehre - is one extra common cleanup block. */ +/* Check whether this is the last basic block of function. Commonly + there is one extra common cleanup block. */ static bool -last_basic_block_p (bb) - basic_block bb; +last_basic_block_p (basic_block bb) { if (bb == EXIT_BLOCK_PTR) return false; return (bb->next_bb == EXIT_BLOCK_PTR || (bb->next_bb->next_bb == EXIT_BLOCK_PTR - && bb->succ && !bb->succ->succ_next - && bb->succ->dest->next_bb == EXIT_BLOCK_PTR)); + && EDGE_COUNT (bb->succs) == 1 + && EDGE_SUCC (bb, 0)->dest->next_bb == EXIT_BLOCK_PTR)); } -/* Sets branch probabilities according to PREDiction and FLAGS. HEADS[bb->index] - should be index of basic block in that we need to alter branch predictions - (i.e. the first of our dominators such that we do not post-dominate it) - (but we fill this information on demand, so -1 may be there in case this - was not needed yet). */ +/* Sets branch probabilities according to PREDiction and + FLAGS. HEADS[bb->index] should be index of basic block in that we + need to alter branch predictions (i.e. the first of our dominators + such that we do not post-dominate it) (but we fill this information + on demand, so -1 may be there in case this was not needed yet). */ static void -process_note_prediction (bb, heads, dominators, post_dominators, pred, flags) - basic_block bb; - int *heads; - int *dominators; - sbitmap *post_dominators; - int pred; - int flags; +predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, + enum prediction taken) { edge e; + edge_iterator ei; int y; - bool taken; - - taken = flags & IS_TAKEN; if (heads[bb->index] < 0) { /* This is first time we need this field in heads array; so find first dominator that we do not post-dominate (we are using already known members of heads array). */ - int ai = bb->index; - int next_ai = dominators[bb->index]; + basic_block ai = bb; + basic_block next_ai = get_immediate_dominator (CDI_DOMINATORS, bb); int head; - while (heads[next_ai] < 0) + while (heads[next_ai->index] < 0) { - if (!TEST_BIT (post_dominators[next_ai], bb->index)) + if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb)) break; - heads[next_ai] = ai; + heads[next_ai->index] = ai->index; ai = next_ai; - next_ai = dominators[next_ai]; + next_ai = get_immediate_dominator (CDI_DOMINATORS, next_ai); } - if (!TEST_BIT (post_dominators[next_ai], bb->index)) - head = next_ai; + if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb)) + head = next_ai->index; else - head = heads[next_ai]; - while (next_ai != bb->index) + head = heads[next_ai->index]; + while (next_ai != bb) { next_ai = ai; - ai = heads[ai]; - heads[next_ai] = head; + if (heads[ai->index] == ENTRY_BLOCK) + ai = ENTRY_BLOCK_PTR; + else + ai = BASIC_BLOCK (heads[ai->index]); + heads[next_ai->index] = head; } } y = heads[bb->index]; /* Now find the edge that leads to our branch and aply the prediction. */ - if (y == n_basic_blocks) + if (y == last_basic_block) return; - for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (y)->succs) if (e->dest->index >= 0 - && TEST_BIT (post_dominators[e->dest->index], bb->index)) + && dominated_by_p (CDI_POST_DOMINATORS, e->dest, bb)) predict_edge_def (e, pred, taken); } - -/* Gathers NOTE_INSN_PREDICTIONs in given basic block and turns them - into branch probabilities. For description of heads array, see - process_note_prediction. */ - -static void -process_note_predictions (bb, heads, dominators, post_dominators) - basic_block bb; - int *heads; - int *dominators; - sbitmap *post_dominators; -{ - rtx insn; - edge e; - - /* Additionaly, we check here for blocks with no successors. */ - int contained_noreturn_call = 0; - int was_bb_head = 0; - int noreturn_block = 1; - - for (insn = bb->end; insn; - was_bb_head |= (insn == bb->head), insn = PREV_INSN (insn)) - { - if (GET_CODE (insn) != NOTE) - { - if (was_bb_head) - break; - else - { - /* Noreturn calls cause program to exit, therefore they are - always predicted as not taken. */ - if (GET_CODE (insn) == CALL_INSN - && find_reg_note (insn, REG_NORETURN, NULL)) - contained_noreturn_call = 1; - continue; - } - } - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION) - { - int alg = (int) NOTE_PREDICTION_ALG (insn); - /* Process single prediction note. */ - process_note_prediction (bb, - heads, - dominators, - post_dominators, - alg, (int) NOTE_PREDICTION_FLAGS (insn)); - delete_insn (insn); - } - } - for (e = bb->succ; e; e = e->succ_next) - if (!(e->flags & EDGE_FAKE)) - noreturn_block = 0; - if (contained_noreturn_call) - { - /* This block ended from other reasons than because of return. - If it is because of noreturn call, this should certainly not - be taken. Otherwise it is probably some error recovery. */ - process_note_prediction (bb, - heads, - dominators, - post_dominators, PRED_NORETURN, NOT_TAKEN); - } -} - -/* Gathers NOTE_INSN_PREDICTIONs and turns them into - branch probabilities. */ - -void -note_prediction_to_br_prob () -{ - int i; - sbitmap *post_dominators; - int *dominators, *heads; - - /* To enable handling of noreturn blocks. */ - add_noreturn_fake_exit_edges (); - connect_infinite_loops_to_exit (); - - dominators = xmalloc (sizeof (int) * n_basic_blocks); - memset (dominators, -1, sizeof (int) * n_basic_blocks); - post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); - calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS); - calculate_dominance_info (dominators, NULL, CDI_DOMINATORS); - - heads = xmalloc (sizeof (int) * n_basic_blocks); - memset (heads, -1, sizeof (int) * n_basic_blocks); - heads[ENTRY_BLOCK_PTR->next_bb->index] = n_basic_blocks; - - /* Process all prediction notes. */ - - for (i = 0; i < n_basic_blocks; ++i) - { - basic_block bb = BASIC_BLOCK (i); - process_note_predictions (bb, heads, dominators, post_dominators); - } - - sbitmap_vector_free (post_dominators); - free (dominators); - free (heads); - - remove_fake_edges (); -} /* This is used to carry information about basic blocks. It is attached to the AUX field of the standard CFG block. */ @@ -873,13 +1525,13 @@ note_prediction_to_br_prob () typedef struct block_info_def { /* Estimated frequency of execution of basic_block. */ - REAL_VALUE_TYPE frequency; + sreal frequency; /* To keep queue of basic blocks to process. */ basic_block next; - /* True if block needs to be visited in prop_freqency. */ - int tovisit:1; + /* True if block needs to be visited in propagate_freq. */ + unsigned int tovisit:1; /* Number of predecessors we need to visit first. */ int npredecessors; @@ -891,42 +1543,41 @@ typedef struct edge_info_def /* In case edge is an loopback edge, the probability edge will be reached in case header is. Estimated number of iterations of the loop can be then computed as 1 / (1 - back_edge_prob). */ - REAL_VALUE_TYPE back_edge_prob; + sreal back_edge_prob; /* True if the edge is an loopback edge in the natural loop. */ - int back_edge:1; + unsigned int back_edge:1; } *edge_info; #define BLOCK_INFO(B) ((block_info) (B)->aux) #define EDGE_INFO(E) ((edge_info) (E)->aux) /* Helper function for estimate_bb_frequencies. - Propagate the frequencies for loops headed by HEAD. */ + Propagate the frequencies for LOOP. */ static void -propagate_freq (head) - basic_block head; +propagate_freq (struct loop *loop) { - basic_block bb = head; - basic_block last = bb; + basic_block head = loop->header; + basic_block bb; + basic_block last; edge e; basic_block nextbb; - int n; /* For each basic block we need to visit count number of his predecessors we need to visit first. */ - for (n = 0; n < n_basic_blocks; n++) + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { - basic_block bb = BASIC_BLOCK (n); if (BLOCK_INFO (bb)->tovisit) { + edge_iterator ei; int count = 0; - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) count++; else if (BLOCK_INFO (e->src)->tovisit - && rtl_dump_file && !EDGE_INFO (e)->back_edge) - fprintf (rtl_dump_file, + && dump_file && !EDGE_INFO (e)->back_edge) + fprintf (dump_file, "Irreducible region hit, ignoring edge to %i->%i\n", e->src->index, bb->index); BLOCK_INFO (bb)->npredecessors = count; @@ -934,9 +1585,11 @@ propagate_freq (head) } memcpy (&BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one)); - for (; bb; bb = nextbb) + last = head; + for (bb = head; bb; bb = nextbb) { - REAL_VALUE_TYPE cyclic_probability, frequency; + edge_iterator ei; + sreal cyclic_probability, frequency; memcpy (&cyclic_probability, &real_zero, sizeof (real_zero)); memcpy (&frequency, &real_zero, sizeof (real_zero)); @@ -948,68 +1601,73 @@ propagate_freq (head) if (bb != head) { #ifdef ENABLE_CHECKING - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) abort (); #endif - for (e = bb->pred; e; e = e->pred_next) + FOR_EACH_EDGE (e, ei, bb->preds) if (EDGE_INFO (e)->back_edge) { - REAL_ARITHMETIC (cyclic_probability, PLUS_EXPR, - cyclic_probability, - EDGE_INFO (e)->back_edge_prob); + sreal_add (&cyclic_probability, &cyclic_probability, + &EDGE_INFO (e)->back_edge_prob); } else if (!(e->flags & EDGE_DFS_BACK)) { - REAL_VALUE_TYPE tmp; + sreal tmp; /* frequency += (e->probability * BLOCK_INFO (e->src)->frequency / REG_BR_PROB_BASE); */ - REAL_VALUE_FROM_INT (tmp, e->probability, 0, - TYPE_MODE (double_type_node)); - REAL_ARITHMETIC (tmp, MULT_EXPR, tmp, - BLOCK_INFO (e->src)->frequency); - REAL_ARITHMETIC (tmp, RDIV_EXPR, tmp, real_br_prob_base); - REAL_ARITHMETIC (frequency, PLUS_EXPR, frequency, tmp); + sreal_init (&tmp, e->probability, 0); + sreal_mul (&tmp, &tmp, &BLOCK_INFO (e->src)->frequency); + sreal_mul (&tmp, &tmp, &real_inv_br_prob_base); + sreal_add (&frequency, &frequency, &tmp); } - if (REAL_VALUES_LESS (real_almost_one, cyclic_probability)) - memcpy (&cyclic_probability, &real_almost_one, sizeof (real_zero)); + if (sreal_compare (&cyclic_probability, &real_zero) == 0) + { + memcpy (&BLOCK_INFO (bb)->frequency, &frequency, + sizeof (frequency)); + } + else + { + if (sreal_compare (&cyclic_probability, &real_almost_one) > 0) + { + memcpy (&cyclic_probability, &real_almost_one, + sizeof (real_almost_one)); + } - /* BLOCK_INFO (bb)->frequency = frequency / (1 - cyclic_probability) - */ + /* BLOCK_INFO (bb)->frequency = frequency + / (1 - cyclic_probability) */ - REAL_ARITHMETIC (cyclic_probability, MINUS_EXPR, real_one, - cyclic_probability); - REAL_ARITHMETIC (BLOCK_INFO (bb)->frequency, - RDIV_EXPR, frequency, cyclic_probability); + sreal_sub (&cyclic_probability, &real_one, &cyclic_probability); + sreal_div (&BLOCK_INFO (bb)->frequency, + &frequency, &cyclic_probability); + } } BLOCK_INFO (bb)->tovisit = 0; /* Compute back edge frequencies. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest == head) { - REAL_VALUE_TYPE tmp; - + sreal tmp; + /* EDGE_INFO (e)->back_edge_prob - = ((e->probability * BLOCK_INFO (bb)->frequency) - / REG_BR_PROB_BASE); */ - REAL_VALUE_FROM_INT (tmp, e->probability, 0, - TYPE_MODE (double_type_node)); - REAL_ARITHMETIC (tmp, MULT_EXPR, tmp, - BLOCK_INFO (bb)->frequency); - REAL_ARITHMETIC (EDGE_INFO (e)->back_edge_prob, - RDIV_EXPR, tmp, real_br_prob_base); - + = ((e->probability * BLOCK_INFO (bb)->frequency) + / REG_BR_PROB_BASE); */ + + sreal_init (&tmp, e->probability, 0); + sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency); + sreal_mul (&EDGE_INFO (e)->back_edge_prob, + &tmp, &real_inv_br_prob_base); } /* Propagate to successor blocks. */ - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) if (!(e->flags & EDGE_DFS_BACK) && BLOCK_INFO (e->dest)->npredecessors) { @@ -1020,94 +1678,72 @@ propagate_freq (head) nextbb = e->dest; else BLOCK_INFO (last)->next = e->dest; - + last = e->dest; } - } + } } } /* Estimate probabilities of loopback edges in loops at same nest level. */ static void -estimate_loops_at_level (first_loop) - struct loop *first_loop; +estimate_loops_at_level (struct loop *first_loop) { - struct loop *l, *loop = first_loop; + struct loop *loop; for (loop = first_loop; loop; loop = loop->next) { - int n; edge e; + basic_block *bbs; + unsigned i; estimate_loops_at_level (loop->inner); - /* Find current loop back edge and mark it. */ - for (e = loop->latch->succ; e->dest != loop->header; e = e->succ_next) - ; - - EDGE_INFO (e)->back_edge = 1; - - /* In case the loop header is shared, ensure that it is the last - one sharing the same header, so we avoid redundant work. */ - if (loop->shared) + /* Do not do this for dummy function loop. */ + if (EDGE_COUNT (loop->latch->succs) > 0) { - for (l = loop->next; l; l = l->next) - if (l->header == loop->header) - break; - - if (l) - continue; - } - - /* Now merge all nodes of all loops with given header as not visited. */ - for (l = loop->shared ? first_loop : loop; l != loop->next; l = l->next) - if (loop->header == l->header) - EXECUTE_IF_SET_IN_SBITMAP (l->nodes, 0, n, - BLOCK_INFO (BASIC_BLOCK (n))->tovisit = 1 - ); - - propagate_freq (loop->header); + /* Find current loop back edge and mark it. */ + e = loop_latch_edge (loop); + EDGE_INFO (e)->back_edge = 1; + } + + bbs = get_loop_body (loop); + for (i = 0; i < loop->num_nodes; i++) + BLOCK_INFO (bbs[i])->tovisit = 1; + free (bbs); + propagate_freq (loop); } } -/* Convert counts measured by profile driven feedback to frequencies. */ +/* Convert counts measured by profile driven feedback to frequencies. + Return nonzero iff there was any nonzero execution count. */ -static void -counts_to_freqs () +int +counts_to_freqs (void) { - HOST_WIDEST_INT count_max = 1; - int i; + gcov_type count_max, true_count_max = 0; + basic_block bb; - for (i = 0; i < n_basic_blocks; i++) - count_max = MAX (BASIC_BLOCK (i)->count, count_max); + FOR_EACH_BB (bb) + true_count_max = MAX (bb->count, true_count_max); - for (i = -2; i < n_basic_blocks; i++) - { - basic_block bb; - - if (i == -2) - bb = ENTRY_BLOCK_PTR; - else if (i == -1) - bb = EXIT_BLOCK_PTR; - else - bb = BASIC_BLOCK (i); - - bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max; - } + count_max = MAX (true_count_max, 1); + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) + bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max; + return true_count_max; } /* Return true if function is likely to be expensive, so there is no point to optimize performance of prologue, epilogue or do inlining at the expense - of code size growth. THRESHOLD is the limit of number of isntructions + of code size growth. THRESHOLD is the limit of number of instructions function can execute at average to be still considered not expensive. */ bool -expensive_function_p (threshold) - int threshold; +expensive_function_p (int threshold) { unsigned int sum = 0; - int i; + basic_block bb; unsigned int limit; /* We can not compute accurately for large thresholds due to scaled @@ -1123,12 +1759,11 @@ expensive_function_p (threshold) /* Maximally BB_FREQ_MAX^2 so overflow won't happen. */ limit = ENTRY_BLOCK_PTR->frequency * threshold; - for (i = 0; i < n_basic_blocks; i++) + FOR_EACH_BB (bb) { - basic_block bb = BASIC_BLOCK (i); rtx insn; - for (insn = bb->head; insn != NEXT_INSN (bb->end); + for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (active_insn_p (insn)) { @@ -1144,83 +1779,46 @@ expensive_function_p (threshold) /* Estimate basic blocks frequency by given branch probabilities. */ static void -estimate_bb_frequencies (loops) - struct loops *loops; +estimate_bb_frequencies (struct loops *loops) { - int i; - REAL_VALUE_TYPE freq_max; - enum machine_mode double_mode = TYPE_MODE (double_type_node); + basic_block bb; + sreal freq_max; - if (flag_branch_probabilities) - counts_to_freqs (); - else + if (!flag_branch_probabilities || !counts_to_freqs ()) { - REAL_VALUE_FROM_INT (real_zero, 0, 0, double_mode); - REAL_VALUE_FROM_INT (real_one, 1, 0, double_mode); - REAL_VALUE_FROM_INT (real_br_prob_base, REG_BR_PROB_BASE, 0, double_mode); - REAL_VALUE_FROM_INT (real_bb_freq_max, BB_FREQ_MAX, 0, double_mode); - REAL_VALUE_FROM_INT (real_one_half, 2, 0, double_mode); - - REAL_ARITHMETIC (real_one_half, RDIV_EXPR, real_one, real_one_half); - - REAL_ARITHMETIC (real_almost_one, RDIV_EXPR, real_one, real_br_prob_base); - REAL_ARITHMETIC (real_almost_one, MINUS_EXPR, real_one, real_almost_one); + static int real_values_initialized = 0; + + if (!real_values_initialized) + { + real_values_initialized = 1; + sreal_init (&real_zero, 0, 0); + sreal_init (&real_one, 1, 0); + sreal_init (&real_br_prob_base, REG_BR_PROB_BASE, 0); + sreal_init (&real_bb_freq_max, BB_FREQ_MAX, 0); + sreal_init (&real_one_half, 1, -1); + sreal_div (&real_inv_br_prob_base, &real_one, &real_br_prob_base); + sreal_sub (&real_almost_one, &real_one, &real_inv_br_prob_base); + } mark_dfs_back_edges (); - /* Fill in the probability values in flowgraph based on the REG_BR_PROB - notes. */ - for (i = 0; i < n_basic_blocks; i++) - { - rtx last_insn = BLOCK_END (i); - - if (GET_CODE (last_insn) != JUMP_INSN || !any_condjump_p (last_insn) - /* Avoid handling of conditional jumps jumping to fallthru edge. */ - || BASIC_BLOCK (i)->succ->succ_next == NULL) - { - /* We can predict only conditional jumps at the moment. - Expect each edge to be equally probable. - ?? In the future we want to make abnormal edges improbable. */ - int nedges = 0; - edge e; - - for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next) - { - nedges++; - if (e->probability != 0) - break; - } - if (!e) - for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next) - e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; - } - } - ENTRY_BLOCK_PTR->succ->probability = REG_BR_PROB_BASE; + EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->probability = REG_BR_PROB_BASE; /* Set up block info for each basic block. */ alloc_aux_for_blocks (sizeof (struct block_info_def)); alloc_aux_for_edges (sizeof (struct edge_info_def)); - for (i = -2; i < n_basic_blocks; i++) + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - basic_block bb; - - if (i == -2) - bb = ENTRY_BLOCK_PTR; - else if (i == -1) - bb = EXIT_BLOCK_PTR; - else - bb = BASIC_BLOCK (i); + edge_iterator ei; BLOCK_INFO (bb)->tovisit = 0; - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { - - REAL_VALUE_FROM_INT (EDGE_INFO (e)->back_edge_prob, - e->probability, 0, double_mode); - REAL_ARITHMETIC (EDGE_INFO (e)->back_edge_prob, - RDIV_EXPR, EDGE_INFO (e)->back_edge_prob, - real_br_prob_base); + sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0); + sreal_mul (&EDGE_INFO (e)->back_edge_prob, + &EDGE_INFO (e)->back_edge_prob, + &real_inv_br_prob_base); } } @@ -1228,38 +1826,19 @@ estimate_bb_frequencies (loops) to outermost to examine probabilities for back edges. */ estimate_loops_at_level (loops->tree_root); - /* Now fake loop around whole function to finalize probabilities. */ - for (i = 0; i < n_basic_blocks; i++) - BLOCK_INFO (BASIC_BLOCK (i))->tovisit = 1; - - BLOCK_INFO (ENTRY_BLOCK_PTR)->tovisit = 1; - BLOCK_INFO (EXIT_BLOCK_PTR)->tovisit = 1; - propagate_freq (ENTRY_BLOCK_PTR); - memcpy (&freq_max, &real_zero, sizeof (real_zero)); - for (i = 0; i < n_basic_blocks; i++) - if (REAL_VALUES_LESS - (freq_max, BLOCK_INFO (BASIC_BLOCK (i))->frequency)) - memcpy (&freq_max, &BLOCK_INFO (BASIC_BLOCK (i))->frequency, - sizeof (freq_max)); + FOR_EACH_BB (bb) + if (sreal_compare (&freq_max, &BLOCK_INFO (bb)->frequency) < 0) + memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max)); - for (i = -2; i < n_basic_blocks; i++) + sreal_div (&freq_max, &real_bb_freq_max, &freq_max); + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { - basic_block bb; - REAL_VALUE_TYPE tmp; + sreal tmp; - if (i == -2) - bb = ENTRY_BLOCK_PTR; - else if (i == -1) - bb = EXIT_BLOCK_PTR; - else - bb = BASIC_BLOCK (i); - - REAL_ARITHMETIC (tmp, MULT_EXPR, BLOCK_INFO (bb)->frequency, - real_bb_freq_max); - REAL_ARITHMETIC (tmp, RDIV_EXPR, tmp, freq_max); - REAL_ARITHMETIC (tmp, PLUS_EXPR, tmp, real_one_half); - bb->frequency = REAL_VALUE_UNSIGNED_FIX (tmp); + sreal_mul (&tmp, &BLOCK_INFO (bb)->frequency, &freq_max); + sreal_add (&tmp, &tmp, &real_one_half); + bb->frequency = sreal_to_int (&tmp); } free_aux_for_blocks (); @@ -1272,16 +1851,15 @@ estimate_bb_frequencies (loops) /* Decide whether function is hot, cold or unlikely executed. */ static void -compute_function_frequency () +compute_function_frequency (void) { - int i; - if (!profile_info.count_profiles_merged - || !flag_branch_probabilities) + basic_block bb; + + if (!profile_info || !flag_branch_probabilities) return; cfun->function_frequency = FUNCTION_FREQUENCY_UNLIKELY_EXECUTED; - for (i = 0; i < n_basic_blocks; i++) + FOR_EACH_BB (bb) { - basic_block bb = BASIC_BLOCK (i); if (maybe_hot_bb_p (bb)) { cfun->function_frequency = FUNCTION_FREQUENCY_HOT; @@ -1294,11 +1872,23 @@ compute_function_frequency () /* Choose appropriate section for the function. */ static void -choose_function_section () +choose_function_section (void) { if (DECL_SECTION_NAME (current_function_decl) - || !targetm.have_named_sections) + || !targetm.have_named_sections + /* Theoretically we can split the gnu.linkonce text section too, + but this requires more work as the frequency needs to match + for all generated objects so we need to merge the frequency + of all instances. For now just never set frequency for these. */ + || DECL_ONE_ONLY (current_function_decl)) + return; + + /* If we are doing the partitioning optimization, let the optimization + choose the correct section into which to put things. */ + + if (flag_reorder_blocks_and_partition) return; + if (cfun->function_frequency == FUNCTION_FREQUENCY_HOT) DECL_SECTION_NAME (current_function_decl) = build_string (strlen (HOT_TEXT_SECTION_NAME), HOT_TEXT_SECTION_NAME); @@ -1307,3 +1897,21 @@ choose_function_section () build_string (strlen (UNLIKELY_EXECUTED_TEXT_SECTION_NAME), UNLIKELY_EXECUTED_TEXT_SECTION_NAME); } + + +struct tree_opt_pass pass_profile = +{ + "profile", /* name */ + NULL, /* gate */ + tree_estimate_probability, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_BRANCH_PROB, /* tv_id */ + PROP_cfg, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_ggc_collect | TODO_verify_ssa, /* todo_flags_finish */ + 0 /* letter */ +};