X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fcfgrtl.c;h=7eb4362341b18436266d7278eb806dd1b9d6aed3;hb=a27e10150254628bfb2259797135345eebb3f82a;hp=8ef48d092a1143a717056d5a9ba55ab055b9a229;hpb=a0fee14aa7831636bf6b6fc6d5fb852979c80ab4;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c index 8ef48d092a1..7eb4362341b 100644 --- a/gcc/cfgrtl.c +++ b/gcc/cfgrtl.c @@ -1,12 +1,13 @@ /* Control flow graph manipulation code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,9 +16,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +along with GCC; see the file COPYING3. If not see +. */ /* This file contains low level functions to manipulate the CFG and analyze it that are aware of the RTL intermediate language. @@ -30,6 +30,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA insert_insn_on_edge, commit_edge_insertions - CFG updating after insn simplification purge_dead_edges, purge_all_dead_edges + - CFG fixing after coarse manipulation + fixup_abnormal_edges Functions not supposed for generic use: - Infrastructure to determine quickly basic block for insn @@ -42,7 +44,6 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "coretypes.h" #include "tm.h" #include "tree.h" -#include "rtl.h" #include "hard-reg-set.h" #include "basic-block.h" #include "regs.h" @@ -50,25 +51,22 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "output.h" #include "function.h" #include "except.h" -#include "toplev.h" +#include "rtl-error.h" #include "tm_p.h" #include "obstack.h" +#include "insn-attr.h" #include "insn-config.h" #include "cfglayout.h" #include "expr.h" - - -/* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */ -/* ??? Should probably be using LABEL_NUSES instead. It would take a - bit of surgery to be able to use or co-opt the routines in jump. */ -rtx label_value_list; - -static int can_delete_note_p (rtx); -static int can_delete_label_p (rtx); -static void commit_one_edge_insertion (edge, int); -static rtx last_loop_beg_note (rtx); -static bool back_edge_of_syntactic_loop_p (basic_block, basic_block); -basic_block force_nonfallthru_and_redirect (edge, basic_block); +#include "target.h" +#include "common/common-target.h" +#include "cfgloop.h" +#include "ggc.h" +#include "tree-pass.h" +#include "df.h" + +static int can_delete_note_p (const_rtx); +static int can_delete_label_p (const_rtx); static basic_block rtl_split_edge (edge); static bool rtl_move_block_after (basic_block, basic_block); static int rtl_verify_flow_info (void); @@ -80,33 +78,37 @@ static void rtl_delete_block (basic_block); static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block); static edge rtl_redirect_edge_and_branch (edge, basic_block); static basic_block rtl_split_block (basic_block, void *); -static void rtl_dump_bb (basic_block, FILE *, int); +static void rtl_dump_bb (basic_block, FILE *, int, int); static int rtl_verify_flow_info_1 (void); -static void mark_killed_regs (rtx, rtx, void *); static void rtl_make_forwarder_block (edge); /* Return true if NOTE is not one of the ones that must be kept paired, so that we may simply delete it. */ static int -can_delete_note_p (rtx note) +can_delete_note_p (const_rtx note) { - return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED - || NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK - || NOTE_LINE_NUMBER (note) == NOTE_INSN_UNLIKELY_EXECUTED_CODE - || NOTE_LINE_NUMBER (note) == NOTE_INSN_PREDICTION); + switch (NOTE_KIND (note)) + { + case NOTE_INSN_DELETED: + case NOTE_INSN_BASIC_BLOCK: + case NOTE_INSN_EPILOGUE_BEG: + return true; + + default: + return false; + } } /* True if a given label can be deleted. */ static int -can_delete_label_p (rtx label) +can_delete_label_p (const_rtx label) { return (!LABEL_PRESERVE_P (label) /* User declared labels must be preserved. */ && LABEL_NAME (label) == 0 - && !in_expr_list_p (forced_labels, label) - && !in_expr_list_p (label_value_list, label)); + && !in_expr_list_p (forced_labels, label)); } /* Delete INSN by patching it out. Return the next insn. */ @@ -118,19 +120,19 @@ delete_insn (rtx insn) rtx note; bool really_delete = true; - if (GET_CODE (insn) == CODE_LABEL) + if (LABEL_P (insn)) { /* Some labels can't be directly removed from the INSN chain, as they - might be references via variables, constant pool etc. - Convert them to the special NOTE_INSN_DELETED_LABEL note. */ + might be references via variables, constant pool etc. + Convert them to the special NOTE_INSN_DELETED_LABEL note. */ if (! can_delete_label_p (insn)) { const char *name = LABEL_NAME (insn); really_delete = false; PUT_CODE (insn, NOTE); - NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL; - NOTE_SOURCE_FILE (insn) = name; + NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL; + NOTE_DELETED_LABEL_NAME (insn) = name; } remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels); @@ -139,33 +141,38 @@ delete_insn (rtx insn) if (really_delete) { /* If this insn has already been deleted, something is very wrong. */ - if (INSN_DELETED_P (insn)) - abort (); + gcc_assert (!INSN_DELETED_P (insn)); remove_insn (insn); INSN_DELETED_P (insn) = 1; } /* If deleting a jump, decrement the use count of the label. Deleting the label itself should happen in the normal course of block merging. */ - if (GET_CODE (insn) == JUMP_INSN - && JUMP_LABEL (insn) - && GET_CODE (JUMP_LABEL (insn)) == CODE_LABEL) - LABEL_NUSES (JUMP_LABEL (insn))--; - - /* Also if deleting an insn that references a label. */ - else + if (JUMP_P (insn)) { - while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != NULL_RTX - && GET_CODE (XEXP (note, 0)) == CODE_LABEL) + if (JUMP_LABEL (insn) + && LABEL_P (JUMP_LABEL (insn))) + LABEL_NUSES (JUMP_LABEL (insn))--; + + /* If there are more targets, remove them too. */ + while ((note + = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX + && LABEL_P (XEXP (note, 0))) { LABEL_NUSES (XEXP (note, 0))--; remove_note (insn, note); } } - if (GET_CODE (insn) == JUMP_INSN - && (GET_CODE (PATTERN (insn)) == ADDR_VEC - || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) + /* Also if deleting any insn that references a label as an operand. */ + while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX + && LABEL_P (XEXP (note, 0))) + { + LABEL_NUSES (XEXP (note, 0))--; + remove_note (insn, note); + } + + if (JUMP_TABLE_DATA_P (insn)) { rtx pat = PATTERN (insn); int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC; @@ -179,7 +186,7 @@ delete_insn (rtx insn) /* When deleting code in bulk (e.g. removing many unreachable blocks) we can delete a label that's a target of the vector before deleting the vector itself. */ - if (GET_CODE (label) != NOTE) + if (!NOTE_P (label)) LABEL_NUSES (label)--; } } @@ -188,6 +195,7 @@ delete_insn (rtx insn) } /* Like delete_insn but also purge dead edges from BB. */ + rtx delete_insn_and_edges (rtx insn) { @@ -205,10 +213,11 @@ delete_insn_and_edges (rtx insn) } /* Unlink a chain of insns between START and FINISH, leaving notes - that must be paired. */ + that must be paired. If CLEAR_BB is true, we set bb field for + insns that cannot be removed to NULL. */ void -delete_insn_chain (rtx start, rtx finish) +delete_insn_chain (rtx start, rtx finish, bool clear_bb) { rtx next; @@ -218,31 +227,19 @@ delete_insn_chain (rtx start, rtx finish) while (1) { next = NEXT_INSN (start); - if (GET_CODE (start) == NOTE && !can_delete_note_p (start)) + if (NOTE_P (start) && !can_delete_note_p (start)) ; else next = delete_insn (start); + if (clear_bb && !INSN_DELETED_P (start)) + set_block_for_insn (start, NULL); + if (start == finish) break; start = next; } } - -/* Like delete_insn but also purge dead edges from BB. */ -void -delete_insn_chain_and_edges (rtx first, rtx last) -{ - bool purge = false; - - if (INSN_P (last) - && BLOCK_FOR_INSN (last) - && BB_END (BLOCK_FOR_INSN (last)) == last) - purge = true; - delete_insn_chain (first, last); - if (purge) - purge_dead_edges (BLOCK_FOR_INSN (last)); -} /* Create a new basic block consisting of the instructions between HEAD and END inclusive. This function is designed to allow fast BB construction - reuses @@ -265,7 +262,7 @@ create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after) rtx after; - if (GET_CODE (head) == CODE_LABEL) + if (LABEL_P (head)) after = head; else { @@ -282,10 +279,11 @@ create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after) bb = alloc_block (); + init_rtl_bb_info (bb); if (!head && !end) head = end = bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ()); - else if (GET_CODE (head) == CODE_LABEL && end) + else if (LABEL_P (head) && end) { bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head); if (head == end) @@ -309,11 +307,12 @@ create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after) BB_HEAD (bb) = head; BB_END (bb) = end; bb->index = last_basic_block++; - bb->flags = BB_NEW; + bb->flags = BB_NEW | BB_RTL; link_block (bb, after); - BASIC_BLOCK (bb->index) = bb; + SET_BASIC_BLOCK (bb->index, bb); + df_bb_refs_record (bb->index, false); update_bb_for_insn (bb); - bb->partition = UNPARTITIONED; + BB_SET_PARTITION (bb, BB_UNPARTITIONED); /* Tag the block so that we know it has been used when considering other basic block notes. */ @@ -330,14 +329,14 @@ create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after) static basic_block rtl_create_basic_block (void *headp, void *endp, basic_block after) { - rtx head = headp, end = endp; + rtx head = (rtx) headp, end = (rtx) endp; basic_block bb; /* Grow the basic block array if needed. */ - if ((size_t) last_basic_block >= VARRAY_SIZE (basic_block_info)) + if ((size_t) last_basic_block >= VEC_length (basic_block, basic_block_info)) { size_t new_size = last_basic_block + (last_basic_block + 3) / 4; - VARRAY_GROW (basic_block_info, new_size); + VEC_safe_grow_cleared (basic_block, gc, basic_block_info, new_size); } n_basic_blocks++; @@ -352,7 +351,6 @@ cfg_layout_create_basic_block (void *head, void *end, basic_block after) { basic_block newbb = rtl_create_basic_block (head, end, after); - initialize_bb_rbi (newbb); return newbb; } @@ -367,45 +365,23 @@ cfg_layout_create_basic_block (void *head, void *end, basic_block after) static void rtl_delete_block (basic_block b) { - rtx insn, end, tmp; + rtx insn, end; /* If the head of this block is a CODE_LABEL, then it might be the - label for an exception handler which can't be reached. - - We need to remove the label from the exception_handler_label list - and remove the associated NOTE_INSN_EH_REGION_BEG and - NOTE_INSN_EH_REGION_END notes. */ - - /* Get rid of all NOTE_INSN_PREDICTIONs and NOTE_INSN_LOOP_CONTs - hanging before the block. */ - - for (insn = PREV_INSN (BB_HEAD (b)); insn; insn = PREV_INSN (insn)) - { - if (GET_CODE (insn) != NOTE) - break; - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION - || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT) - NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED; - } - + label for an exception handler which can't be reached. We need + to remove the label from the exception_handler_label list. */ insn = BB_HEAD (b); - if (GET_CODE (insn) == CODE_LABEL) - maybe_remove_eh_handler (insn); - - /* Include any jump table following the basic block. */ - end = BB_END (b); - if (tablejump_p (end, NULL, &tmp)) - end = tmp; - - /* Include any barrier that may follow the basic block. */ - tmp = next_nonnote_insn (end); - if (tmp && GET_CODE (tmp) == BARRIER) - end = tmp; + end = get_last_bb_insn (b); /* Selectively delete the entire chain. */ BB_HEAD (b) = NULL; - delete_insn_chain (insn, end); + delete_insn_chain (insn, end, true); + + + if (dump_file) + fprintf (dump_file, "deleting block %d\n", b->index); + df_bb_delete (b->index); } /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */ @@ -431,38 +407,119 @@ compute_bb_for_insn (void) /* Release the basic_block_for_insn array. */ -void +unsigned int free_bb_for_insn (void) { rtx insn; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) - if (GET_CODE (insn) != BARRIER) + if (!BARRIER_P (insn)) BLOCK_FOR_INSN (insn) = NULL; + return 0; } +static unsigned int +rest_of_pass_free_cfg (void) +{ +#ifdef DELAY_SLOTS + /* The resource.c machinery uses DF but the CFG isn't guaranteed to be + valid at that point so it would be too late to call df_analyze. */ + if (optimize > 0 && flag_delayed_branch) + { + df_note_add_problem (); + df_analyze (); + } +#endif + + free_bb_for_insn (); + return 0; +} + +struct rtl_opt_pass pass_free_cfg = +{ + { + RTL_PASS, + "*free_cfg", /* name */ + NULL, /* gate */ + rest_of_pass_free_cfg, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_NONE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + PROP_cfg, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ + } +}; + /* Return RTX to emit after when we want to emit code on the entry of function. */ rtx entry_of_function (void) { - return (n_basic_blocks ? BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ()); + return (n_basic_blocks > NUM_FIXED_BLOCKS ? + BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ()); +} + +/* Emit INSN at the entry point of the function, ensuring that it is only + executed once per function. */ +void +emit_insn_at_entry (rtx insn) +{ + edge_iterator ei = ei_start (ENTRY_BLOCK_PTR->succs); + edge e = ei_safe_edge (ei); + gcc_assert (e->flags & EDGE_FALLTHRU); + + insert_insn_on_edge (insn, e); + commit_edge_insertions (); +} + +/* Update BLOCK_FOR_INSN of insns between BEGIN and END + (or BARRIER if found) and notify df of the bb change. + The insn chain range is inclusive + (i.e. both BEGIN and END will be updated. */ + +static void +update_bb_for_insn_chain (rtx begin, rtx end, basic_block bb) +{ + rtx insn; + + end = NEXT_INSN (end); + for (insn = begin; insn != end; insn = NEXT_INSN (insn)) + if (!BARRIER_P (insn)) + df_insn_change_bb (insn, bb); } -/* Update insns block within BB. */ +/* Update BLOCK_FOR_INSN of insns in BB to BB, + and notify df of the change. */ void update_bb_for_insn (basic_block bb) { + update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb); +} + + +/* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK + note associated with the BLOCK. */ + +static rtx +first_insn_after_basic_block_note (basic_block block) +{ rtx insn; - for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) - { - if (GET_CODE (insn) != BARRIER) - set_block_for_insn (insn, bb); - if (insn == BB_END (bb)) - break; - } + /* Get the first instruction in the block. */ + insn = BB_HEAD (block); + + if (insn == NULL_RTX) + return NULL_RTX; + if (LABEL_P (insn)) + insn = NEXT_INSN (insn); + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn)); + + return NEXT_INSN (insn); } - + /* Creates a new basic block just after basic block B by splitting everything after specified instruction I. */ @@ -470,15 +527,35 @@ static basic_block rtl_split_block (basic_block bb, void *insnp) { basic_block new_bb; - rtx insn = insnp; + rtx insn = (rtx) insnp; edge e; + edge_iterator ei; if (!insn) { insn = first_insn_after_basic_block_note (bb); if (insn) - insn = PREV_INSN (insn); + { + rtx next = insn; + + insn = PREV_INSN (insn); + + /* If the block contains only debug insns, insn would have + been NULL in a non-debug compilation, and then we'd end + up emitting a DELETED note. For -fcompare-debug + stability, emit the note too. */ + if (insn != BB_END (bb) + && DEBUG_INSN_P (next) + && DEBUG_INSN_P (BB_END (bb))) + { + while (next != BB_END (bb) && DEBUG_INSN_P (next)) + next = NEXT_INSN (next); + + if (next == BB_END (bb)) + emit_note_after (NOTE_INSN_DELETED, next); + } + } else insn = get_last_insn (); } @@ -491,40 +568,17 @@ rtl_split_block (basic_block bb, void *insnp) /* Create the new basic block. */ new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb); + BB_COPY_PARTITION (new_bb, bb); BB_END (bb) = insn; /* Redirect the outgoing edges. */ - new_bb->succ = bb->succ; - bb->succ = NULL; - for (e = new_bb->succ; e; e = e->succ_next) + new_bb->succs = bb->succs; + bb->succs = NULL; + FOR_EACH_EDGE (e, ei, new_bb->succs) e->src = new_bb; - if (bb->global_live_at_start) - { - new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); - COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end); - - /* We now have to calculate which registers are live at the end - of the split basic block and at the start of the new basic - block. Start with those registers that are known to be live - at the end of the original basic block and get - propagate_block to determine which registers are live. */ - COPY_REG_SET (new_bb->global_live_at_start, bb->global_live_at_end); - propagate_block (new_bb, new_bb->global_live_at_start, NULL, NULL, 0); - COPY_REG_SET (bb->global_live_at_end, - new_bb->global_live_at_start); -#ifdef HAVE_conditional_execution - /* In the presence of conditional execution we are not able to update - liveness precisely. */ - if (reload_completed) - { - bb->flags |= BB_DIRTY; - new_bb->flags |= BB_DIRTY; - } -#endif - } - + /* The new block starts off being dirty. */ + df_set_bb_dirty (bb); return new_bb; } @@ -536,10 +590,19 @@ rtl_merge_blocks (basic_block a, basic_block b) { rtx b_head = BB_HEAD (b), b_end = BB_END (b), a_end = BB_END (a); rtx del_first = NULL_RTX, del_last = NULL_RTX; + rtx b_debug_start = b_end, b_debug_end = b_end; + bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0; int b_empty = 0; + if (dump_file) + fprintf (dump_file, "Merging block %d into block %d...\n", b->index, + a->index); + + while (DEBUG_INSN_P (b_end)) + b_end = PREV_INSN (b_debug_start = b_end); + /* If there was a CODE_LABEL beginning B, delete it. */ - if (GET_CODE (b_head) == CODE_LABEL) + if (LABEL_P (b_head)) { /* Detect basic blocks with nothing but a label. This can happen in particular at the end of a function. */ @@ -564,13 +627,13 @@ rtl_merge_blocks (basic_block a, basic_block b) } /* If there was a jump out of A, delete it. */ - if (GET_CODE (a_end) == JUMP_INSN) + if (JUMP_P (a_end)) { rtx prev; for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev)) - if (GET_CODE (prev) != NOTE - || NOTE_LINE_NUMBER (prev) == NOTE_INSN_BASIC_BLOCK + if (!NOTE_P (prev) + || NOTE_INSN_BASIC_BLOCK_P (prev) || prev == BB_HEAD (a)) break; @@ -592,57 +655,76 @@ rtl_merge_blocks (basic_block a, basic_block b) a_end = PREV_INSN (del_first); } - else if (GET_CODE (NEXT_INSN (a_end)) == BARRIER) + else if (BARRIER_P (NEXT_INSN (a_end))) del_first = NEXT_INSN (a_end); /* Delete everything marked above as well as crap that might be hanging out between the two blocks. */ BB_HEAD (b) = NULL; - delete_insn_chain (del_first, del_last); + delete_insn_chain (del_first, del_last, true); /* Reassociate the insns of B with A. */ if (!b_empty) { - rtx x; - - for (x = a_end; x != b_end; x = NEXT_INSN (x)) - set_block_for_insn (x, a); + update_bb_for_insn_chain (a_end, b_debug_end, a); - set_block_for_insn (b_end, a); - - a_end = b_end; + a_end = b_debug_end; + } + else if (b_end != b_debug_end) + { + /* Move any deleted labels and other notes between the end of A + and the debug insns that make up B after the debug insns, + bringing the debug insns into A while keeping the notes after + the end of A. */ + if (NEXT_INSN (a_end) != b_debug_start) + reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start), + b_debug_end); + update_bb_for_insn_chain (b_debug_start, b_debug_end, a); + a_end = b_debug_end; } + df_bb_delete (b->index); BB_END (a) = a_end; + + /* If B was a forwarder block, propagate the locus on the edge. */ + if (forwarder_p && !EDGE_SUCC (b, 0)->goto_locus) + EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus; + + if (dump_file) + fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index); } + /* Return true when block A and B can be merged. */ + static bool -rtl_can_merge_blocks (basic_block a,basic_block b) +rtl_can_merge_blocks (basic_block a, basic_block b) { - bool partitions_ok = true; - /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot - and cold sections. */ - - if (flag_reorder_blocks_and_partition - && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX) - || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) - || a->partition != b->partition)) - partitions_ok = false; + and cold sections. + + Basic block partitioning may result in some jumps that appear to + be optimizable (or blocks that appear to be mergeable), but which really + must be left untouched (they are required to make it safely across + partition boundaries). See the comments at the top of + bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ + + if (BB_PARTITION (a) != BB_PARTITION (b)) + return false; /* There must be exactly one edge in between the blocks. */ - return (a->succ && !a->succ->succ_next && a->succ->dest == b - && !b->pred->pred_next && a != b + return (single_succ_p (a) + && single_succ (a) == b + && single_pred_p (b) + && a != b /* Must be simple edge. */ - && !(a->succ->flags & EDGE_COMPLEX) - && partitions_ok + && !(single_succ_edge (a)->flags & EDGE_COMPLEX) && a->next_bb == b && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR /* If the jump insn has side effects, we can't kill the edge. */ - && (GET_CODE (BB_END (a)) != JUMP_INSN + && (!JUMP_P (BB_END (a)) || (reload_completed ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a))))); } @@ -656,7 +738,7 @@ block_label (basic_block block) if (block == EXIT_BLOCK_PTR) return NULL_RTX; - if (GET_CODE (BB_HEAD (block)) != CODE_LABEL) + if (!LABEL_P (BB_HEAD (block))) { BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block)); } @@ -674,25 +756,34 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) { basic_block src = e->src; rtx insn = BB_END (src), kill_from; - edge tmp; rtx set; int fallthru = 0; - /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot - and cold sections. */ - - if (flag_reorder_blocks_and_partition - && find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)) + and cold sections. + + Basic block partitioning may result in some jumps that appear to + be optimizable (or blocks that appear to be mergeable), but which really + must be left untouched (they are required to make it safely across + partition boundaries). See the comments at the top of + bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ + + if (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX) + || BB_PARTITION (src) != BB_PARTITION (target)) return NULL; - /* Verify that all targets will be TARGET. */ - for (tmp = src->succ; tmp; tmp = tmp->succ_next) - if (tmp->dest != target && tmp != e) - break; + /* We can replace or remove a complex jump only when we have exactly + two edges. Also, if we have exactly one outgoing edge, we can + redirect that. */ + if (EDGE_COUNT (src->succs) >= 3 + /* Verify that all targets will be TARGET. Specifically, the + edge that is not E must also go to TARGET. */ + || (EDGE_COUNT (src->succs) == 2 + && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target)) + return NULL; - if (tmp || !onlyjump_p (insn)) + if (!onlyjump_p (insn)) return NULL; if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL)) return NULL; @@ -706,7 +797,8 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) the cc0 setter too. */ kill_from = insn; #ifdef HAVE_cc0 - if (reg_mentioned_p (cc0_rtx, PATTERN (insn))) + if (reg_mentioned_p (cc0_rtx, PATTERN (insn)) + && only_sets_cc0_p (PREV_INSN (insn))) kill_from = PREV_INSN (insn); #endif @@ -720,29 +812,30 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) /* Selectively unlink whole insn chain. */ if (in_cfglayout) { - rtx insn = src->rbi->footer; + rtx insn = src->il.rtl->footer; - delete_insn_chain (kill_from, BB_END (src)); + delete_insn_chain (kill_from, BB_END (src), false); /* Remove barriers but keep jumptables. */ while (insn) { - if (GET_CODE (insn) == BARRIER) + if (BARRIER_P (insn)) { if (PREV_INSN (insn)) NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); else - src->rbi->footer = NEXT_INSN (insn); + src->il.rtl->footer = NEXT_INSN (insn); if (NEXT_INSN (insn)) PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); } - if (GET_CODE (insn) == CODE_LABEL) + if (LABEL_P (insn)) break; insn = NEXT_INSN (insn); } } else - delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target))); + delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)), + false); } /* If this already is simplejump, redirect it. */ @@ -755,9 +848,8 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) INSN_UID (insn), e->dest->index, target->index); if (!redirect_jump (insn, block_label (target), 0)) { - if (target == EXIT_BLOCK_PTR) - return NULL; - abort (); + gcc_assert (target == EXIT_BLOCK_PTR); + return NULL; } } @@ -771,7 +863,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) rtx target_label = block_label (target); rtx barrier, label, table; - emit_jump_insn_after (gen_jump (target_label), insn); + emit_jump_insn_after_noloc (gen_jump (target_label), insn); JUMP_LABEL (BB_END (src)) = target_label; LABEL_NUSES (target_label)++; if (dump_file) @@ -779,16 +871,16 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) INSN_UID (insn), INSN_UID (BB_END (src))); - delete_insn_chain (kill_from, insn); + delete_insn_chain (kill_from, insn, false); /* Recognize a tablejump that we are converting to a simple jump and remove its associated CODE_LABEL and ADDR_VEC or ADDR_DIFF_VEC. */ if (tablejump_p (insn, &label, &table)) - delete_insn_chain (label, table); + delete_insn_chain (label, table, false); barrier = next_nonnote_insn (BB_END (src)); - if (!barrier || GET_CODE (barrier) != BARRIER) + if (!barrier || !BARRIER_P (barrier)) emit_barrier_after (BB_END (src)); else { @@ -798,11 +890,9 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) which originally were or were created before jump table are inside the basic block. */ rtx new_insn = BB_END (src); - rtx tmp; - for (tmp = NEXT_INSN (BB_END (src)); tmp != barrier; - tmp = NEXT_INSN (tmp)) - set_block_for_insn (tmp, src); + update_bb_for_insn_chain (NEXT_INSN (BB_END (src)), + PREV_INSN (barrier), src); NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn); PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn); @@ -817,9 +907,11 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) } /* Keep only one edge out and set proper flags. */ - while (src->succ->succ_next) - remove_edge (src->succ); - e = src->succ; + if (!single_succ_p (src)) + remove_edge (e); + gcc_assert (single_succ_p (src)); + + e = single_succ_edge (src); if (fallthru) e->flags = EDGE_FALLTHRU; else @@ -828,65 +920,30 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) e->probability = REG_BR_PROB_BASE; e->count = src->count; - /* We don't want a block to end on a line-number note since that has - the potential of changing the code between -g and not -g. */ - while (GET_CODE (BB_END (e->src)) == NOTE - && NOTE_LINE_NUMBER (BB_END (e->src)) >= 0) - delete_insn (BB_END (e->src)); - if (e->dest != target) redirect_edge_succ (e, target); - return e; } -/* Return last loop_beg note appearing after INSN, before start of next - basic block. Return INSN if there are no such notes. - - When emitting jump to redirect a fallthru edge, it should always appear - after the LOOP_BEG notes, as loop optimizer expect loop to either start by - fallthru edge or jump following the LOOP_BEG note jumping to the loop exit - test. */ - -static rtx -last_loop_beg_note (rtx insn) -{ - rtx last = insn; - - for (insn = NEXT_INSN (insn); insn && GET_CODE (insn) == NOTE - && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK; - insn = NEXT_INSN (insn)) - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) - last = insn; - - return last; -} +/* Subroutine of redirect_branch_edge that tries to patch the jump + instruction INSN so that it reaches block NEW. Do this + only when it originally reached block OLD. Return true if this + worked or the original target wasn't OLD, return false if redirection + doesn't work. */ -/* Redirect edge representing branch of (un)conditional jump or tablejump, - NULL on failure */ -static edge -redirect_branch_edge (edge e, basic_block target) +static bool +patch_jump_insn (rtx insn, rtx old_label, basic_block new_bb) { rtx tmp; - rtx old_label = BB_HEAD (e->dest); - basic_block src = e->src; - rtx insn = BB_END (src); - - /* We can only redirect non-fallthru edges of jump insn. */ - if (e->flags & EDGE_FALLTHRU) - return NULL; - else if (GET_CODE (insn) != JUMP_INSN) - return NULL; - /* Recognize a tablejump and adjust all matching cases. */ if (tablejump_p (insn, NULL, &tmp)) { rtvec vec; int j; - rtx new_label = block_label (target); + rtx new_label = block_label (new_bb); - if (target == EXIT_BLOCK_PTR) - return NULL; + if (new_bb == EXIT_BLOCK_PTR) + return false; if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) vec = XVEC (PATTERN (tmp), 0); else @@ -907,12 +964,54 @@ redirect_branch_edge (edge e, basic_block target) && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF && XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label) { - XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (VOIDmode, + XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode, new_label); --LABEL_NUSES (old_label); ++LABEL_NUSES (new_label); } } + else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL) + { + int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp); + rtx new_label, note; + + if (new_bb == EXIT_BLOCK_PTR) + return false; + new_label = block_label (new_bb); + + for (i = 0; i < n; ++i) + { + rtx old_ref = ASM_OPERANDS_LABEL (tmp, i); + gcc_assert (GET_CODE (old_ref) == LABEL_REF); + if (XEXP (old_ref, 0) == old_label) + { + ASM_OPERANDS_LABEL (tmp, i) + = gen_rtx_LABEL_REF (Pmode, new_label); + --LABEL_NUSES (old_label); + ++LABEL_NUSES (new_label); + } + } + + if (JUMP_LABEL (insn) == old_label) + { + JUMP_LABEL (insn) = new_label; + note = find_reg_note (insn, REG_LABEL_TARGET, new_label); + if (note) + remove_note (insn, note); + } + else + { + note = find_reg_note (insn, REG_LABEL_TARGET, old_label); + if (note) + remove_note (insn, note); + if (JUMP_LABEL (insn) != new_label + && !find_reg_note (insn, REG_LABEL_TARGET, new_label)) + add_reg_note (insn, REG_LABEL_TARGET, new_label); + } + while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label)) + != NULL_RTX) + XEXP (note, 0) = new_label; + } else { /* ?? We may play the games with moving the named labels from @@ -921,22 +1020,54 @@ redirect_branch_edge (edge e, basic_block target) if (computed_jump_p (insn) /* A return instruction can't be redirected. */ || returnjump_p (insn)) - return NULL; - - /* If the insn doesn't go where we think, we're confused. */ - if (JUMP_LABEL (insn) != old_label) - abort (); + return false; - /* If the substitution doesn't succeed, die. This can happen - if the back end emitted unrecognizable instructions or if - target is exit block on some arches. */ - if (!redirect_jump (insn, block_label (target), 0)) + if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label) { - if (target == EXIT_BLOCK_PTR) - return NULL; - abort (); + /* If the insn doesn't go where we think, we're confused. */ + gcc_assert (JUMP_LABEL (insn) == old_label); + + /* If the substitution doesn't succeed, die. This can happen + if the back end emitted unrecognizable instructions or if + target is exit block on some arches. */ + if (!redirect_jump (insn, block_label (new_bb), 0)) + { + gcc_assert (new_bb == EXIT_BLOCK_PTR); + return false; + } } } + return true; +} + + +/* Redirect edge representing branch of (un)conditional jump or tablejump, + NULL on failure */ +static edge +redirect_branch_edge (edge e, basic_block target) +{ + rtx old_label = BB_HEAD (e->dest); + basic_block src = e->src; + rtx insn = BB_END (src); + + /* We can only redirect non-fallthru edges of jump insn. */ + if (e->flags & EDGE_FALLTHRU) + return NULL; + else if (!JUMP_P (insn) && !currently_expanding_to_rtl) + return NULL; + + if (!currently_expanding_to_rtl) + { + if (!patch_jump_insn (insn, old_label, target)) + return NULL; + } + else + /* When expanding this BB might actually contain multiple + jumps (i.e. not yet split by find_many_sub_basic_blocks). + Redirect all of those that match our label. */ + FOR_BB_INSNS (src, insn) + if (JUMP_P (insn) && !patch_jump_insn (insn, old_label, target)) + return NULL; if (dump_file) fprintf (dump_file, "Edge %i->%i redirected to %i\n", @@ -944,6 +1075,7 @@ redirect_branch_edge (edge e, basic_block target) if (e->dest != target) e = redirect_edge_succ_nodup (e, target); + return e; } @@ -972,7 +1104,7 @@ rtl_redirect_edge_and_branch (edge e, basic_block target) if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL) { - src->flags |= BB_DIRTY; + df_set_bb_dirty (src); return ret; } @@ -980,36 +1112,36 @@ rtl_redirect_edge_and_branch (edge e, basic_block target) if (!ret) return NULL; - src->flags |= BB_DIRTY; + df_set_bb_dirty (src); return ret; } /* Like force_nonfallthru below, but additionally performs redirection Used by redirect_edge_and_branch_force. */ -basic_block +static basic_block force_nonfallthru_and_redirect (edge e, basic_block target) { basic_block jump_block, new_bb = NULL, src = e->src; rtx note; edge new_edge; int abnormal_edge_flags = 0; + int loc; /* In the case the last instruction is conditional jump to the next instruction, first redirect the jump itself and then continue by creating a basic block afterwards to redirect fallthru edge. */ if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR && any_condjump_p (BB_END (e->src)) - /* When called from cfglayout, fallthru edges do not - necessarily go to the next block. */ - && e->src->next_bb == e->dest && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest)) { rtx note; edge b = unchecked_make_edge (e->src, target, 0); + bool redirected; + + redirected = redirect_jump (BB_END (e->src), block_label (target), 0); + gcc_assert (redirected); - if (!redirect_jump (BB_END (e->src), block_label (target), 0)) - abort (); note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX); if (note) { @@ -1033,35 +1165,47 @@ force_nonfallthru_and_redirect (edge e, basic_block target) We can't redirect abnormal edge, but we still can split the fallthru one and create separate abnormal edge to original destination. This allows bb-reorder to make such edge non-fallthru. */ - if (e->dest != target) - abort (); + gcc_assert (e->dest == target); abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU); e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU; } - else if (!(e->flags & EDGE_FALLTHRU)) - abort (); - else if (e->src == ENTRY_BLOCK_PTR) + else { - /* We can't redirect the entry block. Create an empty block at the - start of the function which we use to add the new jump. */ - edge *pe1; - basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR); + gcc_assert (e->flags & EDGE_FALLTHRU); + if (e->src == ENTRY_BLOCK_PTR) + { + /* We can't redirect the entry block. Create an empty block + at the start of the function which we use to add the new + jump. */ + edge tmp; + edge_iterator ei; + bool found = false; + + basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR); + + /* Change the existing edge's source to be the new block, and add + a new edge from the entry block to the new block. */ + e->src = bb; + for (ei = ei_start (ENTRY_BLOCK_PTR->succs); (tmp = ei_safe_edge (ei)); ) + { + if (tmp == e) + { + VEC_unordered_remove (edge, ENTRY_BLOCK_PTR->succs, ei.index); + found = true; + break; + } + else + ei_next (&ei); + } - /* Change the existing edge's source to be the new block, and add - a new edge from the entry block to the new block. */ - e->src = bb; - for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next) - if (*pe1 == e) - { - *pe1 = e->succ_next; - break; - } - e->succ_next = 0; - bb->succ = e; - make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU); + gcc_assert (found); + + VEC_safe_push (edge, gc, bb->succs, e); + make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU); + } } - if (e->src->succ->succ_next || abnormal_edge_flags) + if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags) { /* Create the new structures. */ @@ -1070,9 +1214,6 @@ force_nonfallthru_and_redirect (edge e, basic_block target) forward from the last instruction of the old block. */ if (!tablejump_p (BB_END (e->src), NULL, ¬e)) note = BB_END (e->src); - - /* Position the new block correctly relative to loop notes. */ - note = last_loop_beg_note (note); note = NEXT_INSN (note); jump_block = create_basic_block (note, NULL, e->src); @@ -1080,44 +1221,15 @@ force_nonfallthru_and_redirect (edge e, basic_block target) jump_block->frequency = EDGE_FREQUENCY (e); jump_block->loop_depth = target->loop_depth; - if (target->global_live_at_start) - { - jump_block->global_live_at_start - = OBSTACK_ALLOC_REG_SET (&flow_obstack); - jump_block->global_live_at_end - = OBSTACK_ALLOC_REG_SET (&flow_obstack); - COPY_REG_SET (jump_block->global_live_at_start, - target->global_live_at_start); - COPY_REG_SET (jump_block->global_live_at_end, - target->global_live_at_start); - } - /* Make sure new block ends up in correct hot/cold section. */ - jump_block->partition = e->src->partition; - if (flag_reorder_blocks_and_partition) - { - if (e->src->partition == COLD_PARTITION) - { - rtx bb_note, new_note; - for (bb_note = BB_HEAD (jump_block); - bb_note && bb_note != NEXT_INSN (BB_END (jump_block)); - bb_note = NEXT_INSN (bb_note)) - if (GET_CODE (bb_note) == NOTE - && NOTE_LINE_NUMBER (bb_note) == NOTE_INSN_BASIC_BLOCK) - break; - new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, - bb_note); - NOTE_BASIC_BLOCK (new_note) = jump_block; - jump_block->partition = COLD_PARTITION; - } - if (GET_CODE (BB_END (jump_block)) == JUMP_INSN - && !any_condjump_p (BB_END (jump_block)) - && jump_block->succ->crossing_edge ) - REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST - (REG_CROSSING_JUMP, NULL_RTX, - REG_NOTES (BB_END (jump_block))); - } + BB_COPY_PARTITION (jump_block, e->src); + if (flag_reorder_blocks_and_partition + && targetm_common.have_named_sections + && JUMP_P (BB_END (jump_block)) + && !any_condjump_p (BB_END (jump_block)) + && (EDGE_SUCC (jump_block, 0)->flags & EDGE_CROSSING)) + add_reg_note (BB_END (jump_block), REG_CROSSING_JUMP, NULL_RTX); /* Wire edge in. */ new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU); @@ -1133,19 +1245,23 @@ force_nonfallthru_and_redirect (edge e, basic_block target) else jump_block = e->src; + if (e->goto_locus && e->goto_block == NULL) + loc = e->goto_locus; + else + loc = 0; e->flags &= ~EDGE_FALLTHRU; if (target == EXIT_BLOCK_PTR) { #ifdef HAVE_return - emit_jump_insn_after (gen_return (), BB_END (jump_block)); + emit_jump_insn_after_setloc (gen_return (), BB_END (jump_block), loc); #else - abort (); + gcc_unreachable (); #endif } else { rtx label = block_label (target); - emit_jump_insn_after (gen_jump (label), BB_END (jump_block)); + emit_jump_insn_after_setloc (gen_jump (label), BB_END (jump_block), loc); JUMP_LABEL (BB_END (jump_block)) = label; LABEL_NUSES (label)++; } @@ -1156,6 +1272,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target) if (abnormal_edge_flags) make_edge (src, target, abnormal_edge_flags); + df_mark_solutions_dirty (); return new_bb; } @@ -1163,15 +1280,15 @@ force_nonfallthru_and_redirect (edge e, basic_block target) (and possibly create new basic block) to make edge non-fallthru. Return newly created BB or NULL if none. */ -basic_block -force_nonfallthru (edge e) +static basic_block +rtl_force_nonfallthru (edge e) { return force_nonfallthru_and_redirect (e, e->dest); } /* Redirect edge even at the expense of creating new jump insn or basic block. Return new basic block if created, NULL otherwise. - Abort if conversion is impossible. */ + Conversion must be possible. */ static basic_block rtl_redirect_edge_and_branch_force (edge e, basic_block target) @@ -1182,6 +1299,7 @@ rtl_redirect_edge_and_branch_force (edge e, basic_block target) /* In case the edge redirection failed, try to force it to be non-fallthru and redirect newly created simplejump. */ + df_set_bb_dirty (e->src); return force_nonfallthru_and_redirect (e, target); } @@ -1196,7 +1314,7 @@ rtl_tidy_fallthru_edge (edge e) /* ??? In a late-running flow pass, other folks may have deleted basic blocks by nopping out blocks, leaving multiple BARRIERs between here - and the target label. They ought to be chastized and fixed. + and the target label. They ought to be chastised and fixed. We can also wind up with a sequence of undeletable labels between one block and the next. @@ -1212,10 +1330,10 @@ rtl_tidy_fallthru_edge (edge e) If block B consisted only of this single jump, turn it into a deleted note. */ q = BB_END (b); - if (GET_CODE (q) == JUMP_INSN + if (JUMP_P (q) && onlyjump_p (q) && (any_uncondjump_p (q) - || (b->succ == e && e->succ_next == NULL))) + || single_succ_p (b))) { #ifdef HAVE_cc0 /* If this was a conditional jump, we need to also delete @@ -1225,54 +1343,15 @@ rtl_tidy_fallthru_edge (edge e) #endif q = PREV_INSN (q); - - /* We don't want a block to end on a line-number note since that has - the potential of changing the code between -g and not -g. */ - while (GET_CODE (q) == NOTE && NOTE_LINE_NUMBER (q) >= 0) - q = PREV_INSN (q); } /* Selectively unlink the sequence. */ if (q != PREV_INSN (BB_HEAD (c))) - delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c))); + delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false); e->flags |= EDGE_FALLTHRU; } -/* Helper function for split_edge. Return true in case edge BB2 to BB1 - is back edge of syntactic loop. */ - -static bool -back_edge_of_syntactic_loop_p (basic_block bb1, basic_block bb2) -{ - rtx insn; - int count = 0; - basic_block bb; - - if (bb1 == bb2) - return true; - - /* ??? Could we guarantee that bb indices are monotone, so that we could - just compare them? */ - for (bb = bb1; bb && bb != bb2; bb = bb->next_bb) - continue; - - if (!bb) - return false; - - for (insn = BB_END (bb1); insn != BB_HEAD (bb2) && count >= 0; - insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == NOTE) - { - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) - count++; - else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) - count--; - } - - return count >= 0; -} - /* Should move basic block BB after basic block AFTER. NIY. */ static bool @@ -1283,7 +1362,7 @@ rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED, } /* Split a (typically critical) edge. Return the new block. - Abort on abnormal edges. + The edge must not be abnormal. ??? The code generally expects to be called on critical edges. The case of a block ending in an unconditional jump to a @@ -1296,49 +1375,20 @@ rtl_split_edge (edge edge_in) rtx before; /* Abnormal edges cannot be split. */ - if ((edge_in->flags & EDGE_ABNORMAL) != 0) - abort (); + gcc_assert (!(edge_in->flags & EDGE_ABNORMAL)); /* We are going to place the new block in front of edge destination. Avoid existence of fallthru predecessors. */ if ((edge_in->flags & EDGE_FALLTHRU) == 0) { - edge e; - - for (e = edge_in->dest->pred; e; e = e->pred_next) - if (e->flags & EDGE_FALLTHRU) - break; + edge e = find_fallthru_edge (edge_in->dest->preds); if (e) force_nonfallthru (e); } - /* Create the basic block note. - - Where we place the note can have a noticeable impact on the generated - code. Consider this cfg: - - E - | - 0 - / \ - +->1-->2--->E - | | - +--+ - - If we need to insert an insn on the edge from block 0 to block 1, - we want to ensure the instructions we insert are outside of any - loop notes that physically sit between block 0 and block 1. Otherwise - we confuse the loop optimizer into thinking the loop is a phony. */ - - if (edge_in->dest != EXIT_BLOCK_PTR - && PREV_INSN (BB_HEAD (edge_in->dest)) - && GET_CODE (PREV_INSN (BB_HEAD (edge_in->dest))) == NOTE - && (NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (edge_in->dest))) - == NOTE_INSN_LOOP_BEG) - && !back_edge_of_syntactic_loop_p (edge_in->dest, edge_in->src)) - before = PREV_INSN (BB_HEAD (edge_in->dest)); - else if (edge_in->dest != EXIT_BLOCK_PTR) + /* Create the basic block note. */ + if (edge_in->dest != EXIT_BLOCK_PTR) before = BB_HEAD (edge_in->dest); else before = NULL_RTX; @@ -1348,24 +1398,14 @@ rtl_split_edge (edge edge_in) if (edge_in->flags & EDGE_FALLTHRU && edge_in->dest == EXIT_BLOCK_PTR) { before = NEXT_INSN (BB_END (edge_in->src)); - if (before - && GET_CODE (before) == NOTE - && NOTE_LINE_NUMBER (before) == NOTE_INSN_LOOP_END) - before = NEXT_INSN (before); bb = create_basic_block (before, NULL, edge_in->src); + BB_COPY_PARTITION (bb, edge_in->src); } else - bb = create_basic_block (before, NULL, edge_in->dest->prev_bb); - - /* ??? This info is likely going to be out of date very soon. */ - if (edge_in->dest->global_live_at_start) { - bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); - COPY_REG_SET (bb->global_live_at_start, - edge_in->dest->global_live_at_start); - COPY_REG_SET (bb->global_live_at_end, - edge_in->dest->global_live_at_start); + bb = create_basic_block (before, NULL, edge_in->dest->prev_bb); + /* ??? Why not edge_in->dest->prev_bb here? */ + BB_COPY_PARTITION (bb, edge_in->dest); } make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU); @@ -1374,11 +1414,26 @@ rtl_split_edge (edge edge_in) jump instruction to target our new block. */ if ((edge_in->flags & EDGE_FALLTHRU) == 0) { - if (!redirect_edge_and_branch (edge_in, bb)) - abort (); + edge redirected = redirect_edge_and_branch (edge_in, bb); + gcc_assert (redirected); } else - redirect_edge_succ (edge_in, bb); + { + if (edge_in->src != ENTRY_BLOCK_PTR) + { + /* For asm goto even splitting of fallthru edge might + need insn patching, as other labels might point to the + old label. */ + rtx last = BB_END (edge_in->src); + if (last + && JUMP_P (last) + && edge_in->dest != EXIT_BLOCK_PTR + && extract_asm_operands (PATTERN (last)) != NULL_RTX + && patch_jump_insn (last, before, bb)) + df_set_bb_dirty (edge_in->src); + } + redirect_edge_succ (edge_in, bb); + } return bb; } @@ -1392,8 +1447,7 @@ insert_insn_on_edge (rtx pattern, edge e) { /* We cannot insert instructions on an abnormal critical edge. It will be easier to find the culprit if we die now. */ - if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)) - abort (); + gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))); if (e->insns.r == NULL_RTX) start_sequence (); @@ -1406,248 +1460,100 @@ insert_insn_on_edge (rtx pattern, edge e) end_sequence (); } -/* Called from safe_insert_insn_on_edge through note_stores, marks live - registers that are killed by the store. */ -static void -mark_killed_regs (rtx reg, rtx set ATTRIBUTE_UNUSED, void *data) +/* Update the CFG for the instructions queued on edge E. */ + +void +commit_one_edge_insertion (edge e) { - regset killed = data; - int regno, i; + rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last; + basic_block bb; - if (GET_CODE (reg) == SUBREG) - reg = SUBREG_REG (reg); - if (!REG_P (reg)) - return; - regno = REGNO (reg); - if (regno >= FIRST_PSEUDO_REGISTER) - SET_REGNO_REG_SET (killed, regno); - else - { - for (i = 0; i < (int) hard_regno_nregs[regno][GET_MODE (reg)]; i++) - SET_REGNO_REG_SET (killed, regno + i); - } -} + /* Pull the insns off the edge now since the edge might go away. */ + insns = e->insns.r; + e->insns.r = NULL_RTX; -/* Similar to insert_insn_on_edge, tries to put INSN to edge E. Additionally - it checks whether this will not clobber the registers that are live on the - edge (i.e. it requires liveness information to be up-to-date) and if there - are some, then it tries to save and restore them. Returns true if - successful. */ -bool -safe_insert_insn_on_edge (rtx insn, edge e) -{ - rtx x; - regset_head killed_head; - regset killed = INITIALIZE_REG_SET (killed_head); - rtx save_regs = NULL_RTX; - int regno, noccmode; - enum machine_mode mode; - -#ifdef AVOID_CCMODE_COPIES - noccmode = true; -#else - noccmode = false; -#endif - - for (x = insn; x; x = NEXT_INSN (x)) - if (INSN_P (x)) - note_stores (PATTERN (x), mark_killed_regs, killed); - bitmap_operation (killed, killed, e->dest->global_live_at_start, - BITMAP_AND); - - EXECUTE_IF_SET_IN_REG_SET (killed, 0, regno, - { - mode = regno < FIRST_PSEUDO_REGISTER - ? reg_raw_mode[regno] - : GET_MODE (regno_reg_rtx[regno]); - if (mode == VOIDmode) - return false; - - if (noccmode && mode == CCmode) - return false; - - save_regs = alloc_EXPR_LIST (0, - alloc_EXPR_LIST (0, - gen_reg_rtx (mode), - gen_raw_REG (mode, regno)), - save_regs); - }); - - if (save_regs) + /* Figure out where to put these insns. If the destination has + one predecessor, insert there. Except for the exit block. */ + if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR) { - rtx from, to; + bb = e->dest; - start_sequence (); - for (x = save_regs; x; x = XEXP (x, 1)) - { - from = XEXP (XEXP (x, 0), 1); - to = XEXP (XEXP (x, 0), 0); - emit_move_insn (to, from); - } - emit_insn (insn); - for (x = save_regs; x; x = XEXP (x, 1)) - { - from = XEXP (XEXP (x, 0), 0); - to = XEXP (XEXP (x, 0), 1); - emit_move_insn (to, from); - } - insn = get_insns (); - end_sequence (); - free_EXPR_LIST_list (&save_regs); + /* Get the location correct wrt a code label, and "nice" wrt + a basic block note, and before everything else. */ + tmp = BB_HEAD (bb); + if (LABEL_P (tmp)) + tmp = NEXT_INSN (tmp); + if (NOTE_INSN_BASIC_BLOCK_P (tmp)) + tmp = NEXT_INSN (tmp); + if (tmp == BB_HEAD (bb)) + before = tmp; + else if (tmp) + after = PREV_INSN (tmp); + else + after = get_last_insn (); } - insert_insn_on_edge (insn, e); - - FREE_REG_SET (killed); - return true; -} -/* Update the CFG for the instructions queued on edge E. */ - -static void -commit_one_edge_insertion (edge e, int watch_calls) -{ - rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last; - basic_block bb = NULL; - - /* Pull the insns off the edge now since the edge might go away. */ - insns = e->insns.r; - e->insns.r = NULL_RTX; - - /* Special case -- avoid inserting code between call and storing - its return value. */ - if (watch_calls && (e->flags & EDGE_FALLTHRU) && !e->dest->pred->pred_next - && e->src != ENTRY_BLOCK_PTR - && GET_CODE (BB_END (e->src)) == CALL_INSN) + /* If the source has one successor and the edge is not abnormal, + insert there. Except for the entry block. */ + else if ((e->flags & EDGE_ABNORMAL) == 0 + && single_succ_p (e->src) + && e->src != ENTRY_BLOCK_PTR) { - rtx next = next_nonnote_insn (BB_END (e->src)); + bb = e->src; - after = BB_HEAD (e->dest); - /* The first insn after the call may be a stack pop, skip it. */ - while (next - && keep_with_call_p (next)) - { - after = next; - next = next_nonnote_insn (next); - } - bb = e->dest; - } - if (!before && !after) - { - /* Figure out where to put these things. If the destination has - one predecessor, insert there. Except for the exit block. */ - if (e->dest->pred->pred_next == NULL && e->dest != EXIT_BLOCK_PTR) - { - bb = e->dest; - - /* Get the location correct wrt a code label, and "nice" wrt - a basic block note, and before everything else. */ - tmp = BB_HEAD (bb); - if (GET_CODE (tmp) == CODE_LABEL) - tmp = NEXT_INSN (tmp); - if (NOTE_INSN_BASIC_BLOCK_P (tmp)) - tmp = NEXT_INSN (tmp); - if (tmp - && GET_CODE (tmp) == NOTE - && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_UNLIKELY_EXECUTED_CODE) - tmp = NEXT_INSN (tmp); - if (tmp == BB_HEAD (bb)) - before = tmp; - else if (tmp) - after = PREV_INSN (tmp); - else - after = get_last_insn (); - } - - /* If the source has one successor and the edge is not abnormal, - insert there. Except for the entry block. */ - else if ((e->flags & EDGE_ABNORMAL) == 0 - && e->src->succ->succ_next == NULL - && e->src != ENTRY_BLOCK_PTR) - { - bb = e->src; - - /* It is possible to have a non-simple jump here. Consider a target - where some forms of unconditional jumps clobber a register. This - happens on the fr30 for example. - - We know this block has a single successor, so we can just emit - the queued insns before the jump. */ - if (GET_CODE (BB_END (bb)) == JUMP_INSN) - for (before = BB_END (bb); - GET_CODE (PREV_INSN (before)) == NOTE - && NOTE_LINE_NUMBER (PREV_INSN (before)) == - NOTE_INSN_LOOP_BEG; before = PREV_INSN (before)) - ; - else - { - /* We'd better be fallthru, or we've lost track of what's what. */ - if ((e->flags & EDGE_FALLTHRU) == 0) - abort (); + /* It is possible to have a non-simple jump here. Consider a target + where some forms of unconditional jumps clobber a register. This + happens on the fr30 for example. - after = BB_END (bb); - } - } - /* Otherwise we must split the edge. */ + We know this block has a single successor, so we can just emit + the queued insns before the jump. */ + if (JUMP_P (BB_END (bb))) + before = BB_END (bb); else { - bb = split_edge (e); + /* We'd better be fallthru, or we've lost track of what's what. */ + gcc_assert (e->flags & EDGE_FALLTHRU); + after = BB_END (bb); + } + } - /* If we are partitioning hot/cold basic blocks, we must make sure - that the new basic block ends up in the correct section. */ + /* Otherwise we must split the edge. */ + else + { + bb = split_edge (e); + after = BB_END (bb); - bb->partition = e->src->partition; - if (flag_reorder_blocks_and_partition - && e->src != ENTRY_BLOCK_PTR - && e->src->partition == COLD_PARTITION) - { - rtx bb_note, new_note, cur_insn; - - bb_note = NULL_RTX; - for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb)); - cur_insn = NEXT_INSN (cur_insn)) - if (GET_CODE (cur_insn) == NOTE - && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK) - { - bb_note = cur_insn; - break; - } - - new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, - bb_note); - NOTE_BASIC_BLOCK (new_note) = bb; - if (GET_CODE (BB_END (bb)) == JUMP_INSN - && !any_condjump_p (BB_END (bb)) - && bb->succ->crossing_edge ) - REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST - (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb))); - if (after == bb_note) - after = new_note; - } - } + if (flag_reorder_blocks_and_partition + && targetm_common.have_named_sections + && e->src != ENTRY_BLOCK_PTR + && BB_PARTITION (e->src) == BB_COLD_PARTITION + && !(e->flags & EDGE_CROSSING) + && JUMP_P (after) + && !any_condjump_p (after) + && (single_succ_edge (bb)->flags & EDGE_CROSSING)) + add_reg_note (after, REG_CROSSING_JUMP, NULL_RTX); } /* Now that we've found the spot, do the insertion. */ - if (before) { - emit_insn_before (insns, before); + emit_insn_before_noloc (insns, before, bb); last = prev_nonnote_insn (before); } else - last = emit_insn_after (insns, after); + last = emit_insn_after_noloc (insns, after, bb); if (returnjump_p (last)) { /* ??? Remove all outgoing edges from BB and add one for EXIT. - This is not currently a problem because this only happens - for the (single) epilogue, which already has a fallthru edge - to EXIT. */ + This is not currently a problem because this only happens + for the (single) epilogue, which already has a fallthru edge + to EXIT. */ - e = bb->succ; - if (e->dest != EXIT_BLOCK_PTR - || e->succ_next != NULL || (e->flags & EDGE_FALLTHRU) == 0) - abort (); + e = single_succ_edge (bb); + gcc_assert (e->dest == EXIT_BLOCK_PTR + && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU)); e->flags &= ~EDGE_FALLTHRU; emit_barrier_after (last); @@ -1655,11 +1561,8 @@ commit_one_edge_insertion (edge e, int watch_calls) if (before) delete_insn (before); } - else if (GET_CODE (last) == JUMP_INSN) - abort (); - - /* Mark the basic block for find_sub_basic_blocks. */ - bb->aux = &bb->aux; + else + gcc_assert (!JUMP_P (last)); } /* Update the CFG for all queued instructions. */ @@ -1668,8 +1571,6 @@ void commit_edge_insertions (void) { basic_block bb; - sbitmap blocks; - bool changed = false; #ifdef ENABLE_CHECKING verify_flow_info (); @@ -1677,133 +1578,70 @@ commit_edge_insertions (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - edge e, next; + edge e; + edge_iterator ei; - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (e->insns.r) - { - changed = true; - commit_one_edge_insertion (e, false); - } - } + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->insns.r) + commit_one_edge_insertion (e); } - - if (!changed) - return; - - blocks = sbitmap_alloc (last_basic_block); - sbitmap_zero (blocks); - FOR_EACH_BB (bb) - if (bb->aux) - { - SET_BIT (blocks, bb->index); - /* Check for forgotten bb->aux values before commit_edge_insertions - call. */ - if (bb->aux != &bb->aux) - abort (); - bb->aux = NULL; - } - find_many_sub_basic_blocks (blocks); - sbitmap_free (blocks); } -/* Update the CFG for all queued instructions, taking special care of inserting - code on edges between call and storing its return value. */ - -void -commit_edge_insertions_watch_calls (void) -{ - basic_block bb; - sbitmap blocks; - bool changed = false; - -#ifdef ENABLE_CHECKING - verify_flow_info (); -#endif - FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) - { - edge e, next; - - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (e->insns.r) - { - changed = true; - commit_one_edge_insertion (e, true); - } - } - } - - if (!changed) - return; - - blocks = sbitmap_alloc (last_basic_block); - sbitmap_zero (blocks); - FOR_EACH_BB (bb) - if (bb->aux) - { - SET_BIT (blocks, bb->index); - /* Check for forgotten bb->aux values before commit_edge_insertions - call. */ - if (bb->aux != &bb->aux) - abort (); - bb->aux = NULL; - } - find_many_sub_basic_blocks (blocks); - sbitmap_free (blocks); -} - /* Print out RTL-specific basic block information (live information at start and end). */ static void -rtl_dump_bb (basic_block bb, FILE *outf, int indent) +rtl_dump_bb (basic_block bb, FILE *outf, int indent, int flags ATTRIBUTE_UNUSED) { rtx insn; rtx last; char *s_indent; - s_indent = alloca ((size_t) indent + 1); + s_indent = (char *) alloca ((size_t) indent + 1); memset (s_indent, ' ', (size_t) indent); s_indent[indent] = '\0'; - fprintf (outf, ";;%s Registers live at start: ", s_indent); - dump_regset (bb->global_live_at_start, outf); - putc ('\n', outf); + if (df) + { + df_dump_top (bb, outf); + putc ('\n', outf); + } for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last; insn = NEXT_INSN (insn)) print_rtl_single (outf, insn); - fprintf (outf, ";;%s Registers live at end: ", s_indent); - dump_regset (bb->global_live_at_end, outf); - putc ('\n', outf); + if (df) + { + df_dump_bottom (bb, outf); + putc ('\n', outf); + } + } /* Like print_rtl, but also print out live information for the start of each basic block. */ void -print_rtl_with_bb (FILE *outf, rtx rtx_first) +print_rtl_with_bb (FILE *outf, const_rtx rtx_first) { - rtx tmp_rtx; - + const_rtx tmp_rtx; if (rtx_first == 0) fprintf (outf, "(nil)\n"); else { enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB }; int max_uid = get_max_uid (); - basic_block *start = xcalloc (max_uid, sizeof (basic_block)); - basic_block *end = xcalloc (max_uid, sizeof (basic_block)); - enum bb_state *in_bb_p = xcalloc (max_uid, sizeof (enum bb_state)); + basic_block *start = XCNEWVEC (basic_block, max_uid); + basic_block *end = XCNEWVEC (basic_block, max_uid); + enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid); basic_block bb; + if (df) + df_dump_start (outf); + FOR_EACH_BB_REVERSE (bb) { rtx x; @@ -1826,18 +1664,32 @@ print_rtl_with_bb (FILE *outf, rtx rtx_first) for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx)) { int did_output; - if ((bb = start[INSN_UID (tmp_rtx)]) != NULL) { - fprintf (outf, ";; Start of basic block %d, registers live:", - bb->index); - dump_regset (bb->global_live_at_start, outf); - putc ('\n', outf); + edge e; + edge_iterator ei; + + fprintf (outf, ";; Start of basic block ("); + FOR_EACH_EDGE (e, ei, bb->preds) + fprintf (outf, " %d", e->src->index); + fprintf (outf, ") -> %d\n", bb->index); + + if (df) + { + df_dump_top (bb, outf); + putc ('\n', outf); + } + FOR_EACH_EDGE (e, ei, bb->preds) + { + fputs (";; Pred edge ", outf); + dump_edge_info (outf, e, 0); + fputc ('\n', outf); + } } if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB - && GET_CODE (tmp_rtx) != NOTE - && GET_CODE (tmp_rtx) != BARRIER) + && !NOTE_P (tmp_rtx) + && !BARRIER_P (tmp_rtx)) fprintf (outf, ";; Insn is not within a basic block\n"); else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB) fprintf (outf, ";; Insn is in multiple basic blocks\n"); @@ -1846,12 +1698,27 @@ print_rtl_with_bb (FILE *outf, rtx rtx_first) if ((bb = end[INSN_UID (tmp_rtx)]) != NULL) { - fprintf (outf, ";; End of basic block %d, registers live:\n", - bb->index); - dump_regset (bb->global_live_at_end, outf); + edge e; + edge_iterator ei; + + fprintf (outf, ";; End of basic block %d -> (", bb->index); + FOR_EACH_EDGE (e, ei, bb->succs) + fprintf (outf, " %d", e->dest->index); + fprintf (outf, ")\n"); + + if (df) + { + df_dump_bottom (bb, outf); + putc ('\n', outf); + } putc ('\n', outf); + FOR_EACH_EDGE (e, ei, bb->succs) + { + fputs (";; Succ edge ", outf); + dump_edge_info (outf, e, 1); + fputc ('\n', outf); + } } - if (did_output) putc ('\n', outf); } @@ -1861,10 +1728,10 @@ print_rtl_with_bb (FILE *outf, rtx rtx_first) free (in_bb_p); } - if (current_function_epilogue_delay_list != 0) + if (crtl->epilogue_delay_list != 0) { fprintf (outf, "\n;; Insns in epilogue delay list:\n\n"); - for (tmp_rtx = current_function_epilogue_delay_list; tmp_rtx != 0; + for (tmp_rtx = crtl->epilogue_delay_list; tmp_rtx != 0; tmp_rtx = XEXP (tmp_rtx, 1)) print_rtl_single (outf, XEXP (tmp_rtx, 0)); } @@ -1874,26 +1741,50 @@ void update_br_prob_note (basic_block bb) { rtx note; - if (GET_CODE (BB_END (bb)) != JUMP_INSN) + if (!JUMP_P (BB_END (bb))) return; note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX); if (!note || INTVAL (XEXP (note, 0)) == BRANCH_EDGE (bb)->probability) return; XEXP (note, 0) = GEN_INT (BRANCH_EDGE (bb)->probability); } + +/* Get the last insn associated with block BB (that includes barriers and + tablejumps after BB). */ +rtx +get_last_bb_insn (basic_block bb) +{ + rtx tmp; + rtx end = BB_END (bb); + + /* Include any jump table following the basic block. */ + if (tablejump_p (end, NULL, &tmp)) + end = tmp; + + /* Include any barriers that may follow the basic block. */ + tmp = next_nonnote_insn_bb (end); + while (tmp && BARRIER_P (tmp)) + { + end = tmp; + tmp = next_nonnote_insn_bb (end); + } + + return end; +} /* Verify the CFG and RTL consistency common for both underlying RTL and cfglayout RTL. Currently it does following checks: - - test head/end pointers - overlapping of basic blocks + - insns with wrong BLOCK_FOR_INSN pointers - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note) - tails of basic blocks (ensure that boundary is necessary) - scans body of the basic block for JUMP_INSN, CODE_LABEL and NOTE_INSN_BASIC_BLOCK - verify that no fall_thru edge crosses hot/cold partition boundaries + - verify that there are no pending RTL branch predictions In future it can be extended check a lot of other stuff as well (reachability of basic blocks, life information, etc. etc.). */ @@ -1901,62 +1792,47 @@ update_br_prob_note (basic_block bb) static int rtl_verify_flow_info_1 (void) { - const int max_uid = get_max_uid (); - rtx last_head = get_last_insn (); - basic_block *bb_info; rtx x; int err = 0; - basic_block bb, last_bb_seen; - - bb_info = xcalloc (max_uid, sizeof (basic_block)); - - /* Check bb chain & numbers. */ - last_bb_seen = ENTRY_BLOCK_PTR; + basic_block bb; + /* Check the general integrity of the basic blocks. */ FOR_EACH_BB_REVERSE (bb) { - rtx head = BB_HEAD (bb); - rtx end = BB_END (bb); - - /* Verify the end of the basic block is in the INSN chain. */ - for (x = last_head; x != NULL_RTX; x = PREV_INSN (x)) - if (x == end) - break; + rtx insn; - if (!x) + if (!(bb->flags & BB_RTL)) { - error ("end insn %d for block %d not found in the insn stream", - INSN_UID (end), bb->index); + error ("BB_RTL flag not set for block %d", bb->index); err = 1; } - /* Work backwards from the end to the head of the basic block - to verify the head is in the RTL chain. */ - for (; x != NULL_RTX; x = PREV_INSN (x)) - { - /* While walking over the insn chain, verify insns appear - in only one basic block and initialize the BB_INFO array - used by other passes. */ - if (bb_info[INSN_UID (x)] != NULL) - { - error ("insn %d is in multiple basic blocks (%d and %d)", - INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index); - err = 1; - } - - bb_info[INSN_UID (x)] = bb; - - if (x == head) - break; - } - if (!x) - { - error ("head insn %d for block %d not found in the insn stream", - INSN_UID (head), bb->index); - err = 1; - } + FOR_BB_INSNS (bb, insn) + if (BLOCK_FOR_INSN (insn) != bb) + { + error ("insn %d basic block pointer is %d, should be %d", + INSN_UID (insn), + BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0, + bb->index); + err = 1; + } - last_head = x; + for (insn = bb->il.rtl->header; insn; insn = NEXT_INSN (insn)) + if (!BARRIER_P (insn) + && BLOCK_FOR_INSN (insn) != NULL) + { + error ("insn %d in header of bb %d has non-NULL basic block", + INSN_UID (insn), bb->index); + err = 1; + } + for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn)) + if (!BARRIER_P (insn) + && BLOCK_FOR_INSN (insn) != NULL) + { + error ("insn %d in footer of bb %d has non-NULL basic block", + INSN_UID (insn), bb->index); + err = 1; + } } /* Now check the basic blocks (boundaries etc.) */ @@ -1965,27 +1841,32 @@ rtl_verify_flow_info_1 (void) int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0; edge e, fallthru = NULL; rtx note; + edge_iterator ei; - if (INSN_P (BB_END (bb)) + if (JUMP_P (BB_END (bb)) && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX)) - && bb->succ && bb->succ->succ_next + && EDGE_COUNT (bb->succs) >= 2 && any_condjump_p (BB_END (bb))) { - if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability) + if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability + && profile_status != PROFILE_ABSENT) { error ("verify_flow_info: REG_BR_PROB does not match cfg %wi %i", INTVAL (XEXP (note, 0)), BRANCH_EDGE (bb)->probability); err = 1; } } - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) { n_fallthru++, fallthru = e; - if (e->crossing_edge) - { - error ("Fallthru edge crosses section boundary (bb %i)", + if ((e->flags & EDGE_CROSSING) + || (BB_PARTITION (e->src) != BB_PARTITION (e->dest) + && e->src != ENTRY_BLOCK_PTR + && e->dest != EXIT_BLOCK_PTR)) + { + error ("fallthru edge crosses section boundary (bb %i)", e->src->index); err = 1; } @@ -1994,7 +1875,8 @@ rtl_verify_flow_info_1 (void) if ((e->flags & ~(EDGE_DFS_BACK | EDGE_CAN_FALLTHRU | EDGE_IRREDUCIBLE_LOOP - | EDGE_LOOP_EXIT)) == 0) + | EDGE_LOOP_EXIT + | EDGE_CROSSING)) == 0) n_branch++; if (e->flags & EDGE_ABNORMAL_CALL) @@ -2006,53 +1888,61 @@ rtl_verify_flow_info_1 (void) n_abnormal++; } - if (n_eh && GET_CODE (PATTERN (BB_END (bb))) != RESX - && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX)) + if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX)) { - error ("Missing REG_EH_REGION note in the end of bb %i", bb->index); + error ("missing REG_EH_REGION note in the end of bb %i", bb->index); + err = 1; + } + if (n_eh > 1) + { + error ("too many eh edges %i", bb->index); err = 1; } if (n_branch - && (GET_CODE (BB_END (bb)) != JUMP_INSN + && (!JUMP_P (BB_END (bb)) || (n_branch > 1 && (any_uncondjump_p (BB_END (bb)) || any_condjump_p (BB_END (bb)))))) { - error ("Too many outgoing branch edges from bb %i", bb->index); + error ("too many outgoing branch edges from bb %i", bb->index); err = 1; } if (n_fallthru && any_uncondjump_p (BB_END (bb))) { - error ("Fallthru edge after unconditional jump %i", bb->index); + error ("fallthru edge after unconditional jump %i", bb->index); err = 1; } if (n_branch != 1 && any_uncondjump_p (BB_END (bb))) { - error ("Wrong amount of branch edges after unconditional jump %i", bb->index); + error ("wrong number of branch edges after unconditional jump %i", + bb->index); err = 1; } if (n_branch != 1 && any_condjump_p (BB_END (bb)) && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest)) { - error ("Wrong amount of branch edges after conditional jump %i", bb->index); + error ("wrong amount of branch edges after conditional jump %i", + bb->index); err = 1; } - if (n_call && GET_CODE (BB_END (bb)) != CALL_INSN) + if (n_call && !CALL_P (BB_END (bb))) { - error ("Call edges for non-call insn in bb %i", bb->index); + error ("call edges for non-call insn in bb %i", bb->index); err = 1; } if (n_abnormal - && (GET_CODE (BB_END (bb)) != CALL_INSN && n_call != n_abnormal) - && (GET_CODE (BB_END (bb)) != JUMP_INSN + && (!CALL_P (BB_END (bb)) && n_call != n_abnormal) + && (!JUMP_P (BB_END (bb)) || any_condjump_p (BB_END (bb)) || any_uncondjump_p (BB_END (bb)))) { - error ("Abnormal edges for no purpose in bb %i", bb->index); + error ("abnormal edges for no purpose in bb %i", bb->index); err = 1; } for (x = BB_HEAD (bb); x != NEXT_INSN (BB_END (bb)); x = NEXT_INSN (x)) - if (BLOCK_FOR_INSN (x) != bb) + /* We may have a barrier inside a basic block before dead code + elimination. There is no BLOCK_FOR_INSN field in a barrier. */ + if (!BARRIER_P (x) && BLOCK_FOR_INSN (x) != bb) { debug_rtx (x); if (! BLOCK_FOR_INSN (x)) @@ -2068,10 +1958,10 @@ rtl_verify_flow_info_1 (void) } /* OK pointers are correct. Now check the header of basic - block. It ought to contain optional CODE_LABEL followed + block. It ought to contain optional CODE_LABEL followed by NOTE_BASIC_BLOCK. */ x = BB_HEAD (bb); - if (GET_CODE (x) == CODE_LABEL) + if (LABEL_P (x)) { if (BB_END (bb) == x) { @@ -2091,7 +1981,7 @@ rtl_verify_flow_info_1 (void) } if (BB_END (bb) == x) - /* Do checks for empty blocks her. e */ + /* Do checks for empty blocks here. */ ; else for (x = NEXT_INSN (x); x; x = NEXT_INSN (x)) @@ -2115,7 +2005,6 @@ rtl_verify_flow_info_1 (void) } /* Clean up. */ - free (bb_info); return err; } @@ -2124,45 +2013,104 @@ rtl_verify_flow_info_1 (void) Currently it does following checks: - all checks of rtl_verify_flow_info_1 + - test head/end pointers - check that all insns are in the basic blocks (except the switch handling code, barriers and notes) - check that all returns are followed by barriers - check that all fallthru edge points to the adjacent blocks. */ + static int rtl_verify_flow_info (void) { basic_block bb; int err = rtl_verify_flow_info_1 (); rtx x; + rtx last_head = get_last_insn (); + basic_block *bb_info; int num_bb_notes; const rtx rtx_first = get_insns (); basic_block last_bb_seen = ENTRY_BLOCK_PTR, curr_bb = NULL; + const int max_uid = get_max_uid (); + + bb_info = XCNEWVEC (basic_block, max_uid); FOR_EACH_BB_REVERSE (bb) { edge e; - for (e = bb->succ; e; e = e->succ_next) - if (e->flags & EDGE_FALLTHRU) - break; + rtx head = BB_HEAD (bb); + rtx end = BB_END (bb); + + for (x = last_head; x != NULL_RTX; x = PREV_INSN (x)) + { + /* Verify the end of the basic block is in the INSN chain. */ + if (x == end) + break; + + /* And that the code outside of basic blocks has NULL bb field. */ + if (!BARRIER_P (x) + && BLOCK_FOR_INSN (x) != NULL) + { + error ("insn %d outside of basic blocks has non-NULL bb field", + INSN_UID (x)); + err = 1; + } + } + + if (!x) + { + error ("end insn %d for block %d not found in the insn stream", + INSN_UID (end), bb->index); + err = 1; + } + + /* Work backwards from the end to the head of the basic block + to verify the head is in the RTL chain. */ + for (; x != NULL_RTX; x = PREV_INSN (x)) + { + /* While walking over the insn chain, verify insns appear + in only one basic block. */ + if (bb_info[INSN_UID (x)] != NULL) + { + error ("insn %d is in multiple basic blocks (%d and %d)", + INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index); + err = 1; + } + + bb_info[INSN_UID (x)] = bb; + + if (x == head) + break; + } + if (!x) + { + error ("head insn %d for block %d not found in the insn stream", + INSN_UID (head), bb->index); + err = 1; + } + + last_head = PREV_INSN (x); + + e = find_fallthru_edge (bb->succs); if (!e) { rtx insn; /* Ensure existence of barrier in BB with no fallthru edges. */ - for (insn = BB_END (bb); !insn || GET_CODE (insn) != BARRIER; - insn = NEXT_INSN (insn)) - if (!insn - || (GET_CODE (insn) == NOTE - && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)) + for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn)) + { + if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn)) { error ("missing barrier after block %i", bb->index); err = 1; break; } + if (BARRIER_P (insn)) + break; + } } else if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR) - { + { rtx insn; if (e->src->next_bb != e->dest) @@ -2175,21 +2123,29 @@ rtl_verify_flow_info (void) else for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest); insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == BARRIER -#ifndef CASE_DROPS_THROUGH - || INSN_P (insn) -#else - || (INSN_P (insn) && ! JUMP_TABLE_DATA_P (insn)) -#endif - ) + if (BARRIER_P (insn) || INSN_P (insn)) { error ("verify_flow_info: Incorrect fallthru %i->%i", e->src->index, e->dest->index); fatal_insn ("wrong insn in the fallthru edge", insn); err = 1; } - } + } + } + + for (x = last_head; x != NULL_RTX; x = PREV_INSN (x)) + { + /* Check that the code before the first basic block has NULL + bb field. */ + if (!BARRIER_P (x) + && BLOCK_FOR_INSN (x) != NULL) + { + error ("insn %d outside of basic blocks has non-NULL bb field", + INSN_UID (x)); + err = 1; + } } + free (bb_info); num_bb_notes = 0; last_bb_seen = ENTRY_BLOCK_PTR; @@ -2218,9 +2174,7 @@ rtl_verify_flow_info (void) case CODE_LABEL: /* An addr_vec is placed outside any basic block. */ if (NEXT_INSN (x) - && GET_CODE (NEXT_INSN (x)) == JUMP_INSN - && (GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_DIFF_VEC - || GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_VEC)) + && JUMP_TABLE_DATA_P (NEXT_INSN (x))) x = NEXT_INSN (x); /* But in any case, non-deletable labels can appear anywhere. */ @@ -2231,16 +2185,15 @@ rtl_verify_flow_info (void) } } - if (INSN_P (x) - && GET_CODE (x) == JUMP_INSN + if (JUMP_P (x) && returnjump_p (x) && ! condjump_p (x) - && ! (NEXT_INSN (x) && GET_CODE (NEXT_INSN (x)) == BARRIER)) + && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x)))) fatal_insn ("return not followed by barrier", x); if (curr_bb && x == BB_END (curr_bb)) curr_bb = NULL; } - if (num_bb_notes != n_basic_blocks) + if (num_bb_notes != n_basic_blocks - NUM_FIXED_BLOCKS) internal_error ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)", num_bb_notes, n_basic_blocks); @@ -2255,12 +2208,19 @@ rtl_verify_flow_info (void) bool purge_dead_edges (basic_block bb) { - edge e, next; + edge e; rtx insn = BB_END (bb), note; bool purged = false; + bool found; + edge_iterator ei; + + if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb)) + do + insn = PREV_INSN (insn); + while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb)); /* If this instruction cannot trap, remove REG_EH_REGION notes. */ - if (GET_CODE (insn) == INSN + if (NONJUMP_INSN_P (insn) && (note = find_reg_note (insn, REG_EH_REGION, NULL))) { rtx eqnote; @@ -2272,33 +2232,42 @@ purge_dead_edges (basic_block bb) } /* Cleanup abnormal edges caused by exceptions or non-local gotos. */ - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; - if (e->flags & EDGE_EH) + bool remove = false; + + /* There are three types of edges we need to handle correctly here: EH + edges, abnormal call EH edges, and abnormal call non-EH edges. The + latter can appear when nonlocal gotos are used. */ + if (e->flags & EDGE_ABNORMAL_CALL) { - if (can_throw_internal (BB_END (bb))) - continue; + if (!CALL_P (insn)) + remove = true; + else if (can_nonlocal_goto (insn)) + ; + else if ((e->flags & EDGE_EH) && can_throw_internal (insn)) + ; + else + remove = true; } - else if (e->flags & EDGE_ABNORMAL_CALL) + else if (e->flags & EDGE_EH) + remove = !can_throw_internal (insn); + + if (remove) { - if (GET_CODE (BB_END (bb)) == CALL_INSN - && (! (note = find_reg_note (insn, REG_EH_REGION, NULL)) - || INTVAL (XEXP (note, 0)) >= 0)) - continue; + remove_edge (e); + df_set_bb_dirty (bb); + purged = true; } else - continue; - - remove_edge (e); - bb->flags |= BB_DIRTY; - purged = true; + ei_next (&ei); } - if (GET_CODE (insn) == JUMP_INSN) + if (JUMP_P (insn)) { rtx note; edge b,f; + edge_iterator ei; /* We do care only about conditional jumps and simplejumps. */ if (!any_condjump_p (insn) @@ -2317,10 +2286,8 @@ purge_dead_edges (basic_block bb) remove_note (insn, note); } - for (e = bb->succ; e; e = next) + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; - /* Avoid abnormal flags to leak from computed jumps turned into simplejumps. */ @@ -2330,32 +2297,42 @@ purge_dead_edges (basic_block bb) if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn)) /* A conditional jump can fall through into the next block, so we should keep the edge. */ - continue; + { + ei_next (&ei); + continue; + } else if (e->dest != EXIT_BLOCK_PTR && BB_HEAD (e->dest) == JUMP_LABEL (insn)) /* If the destination block is the target of the jump, keep the edge. */ - continue; + { + ei_next (&ei); + continue; + } else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn)) /* If the destination block is the exit block, and this instruction is a return, then keep the edge. */ - continue; + { + ei_next (&ei); + continue; + } else if ((e->flags & EDGE_EH) && can_throw_internal (insn)) /* Keep the edges that correspond to exceptions thrown by this instruction and rematerialize the EDGE_ABNORMAL flag we just cleared above. */ { e->flags |= EDGE_ABNORMAL; + ei_next (&ei); continue; } /* We do not need this edge. */ - bb->flags |= BB_DIRTY; + df_set_bb_dirty (bb); purged = true; remove_edge (e); } - if (!bb->succ || !purged) + if (EDGE_COUNT (bb->succs) == 0 || !purged) return purged; if (dump_file) @@ -2365,10 +2342,10 @@ purge_dead_edges (basic_block bb) return purged; /* Redistribute probabilities. */ - if (!bb->succ->succ_next) + if (single_succ_p (bb)) { - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; + single_succ_edge (bb)->probability = REG_BR_PROB_BASE; + single_succ_edge (bb)->count = bb->count; } else { @@ -2386,16 +2363,15 @@ purge_dead_edges (basic_block bb) return purged; } - else if (GET_CODE (insn) == CALL_INSN && SIBLING_CALL_P (insn)) + else if (CALL_P (insn) && SIBLING_CALL_P (insn)) { /* First, there should not be any EH or ABCALL edges resulting from non-local gotos and the like. If there were, we shouldn't have created the sibcall in the first place. Second, there should of course never have been a fallthru edge. */ - if (!bb->succ || bb->succ->succ_next) - abort (); - if (bb->succ->flags != (EDGE_SIBCALL | EDGE_ABNORMAL)) - abort (); + gcc_assert (single_succ_p (bb)); + gcc_assert (single_succ_edge (bb)->flags + == (EDGE_SIBCALL | EDGE_ABNORMAL)); return 0; } @@ -2405,29 +2381,36 @@ purge_dead_edges (basic_block bb) as these are only created by conditional branches. If we find such an edge we know that there used to be a jump here and can then safely remove all non-fallthru edges. */ - for (e = bb->succ; e && (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)); - e = e->succ_next) - ; + found = false; + FOR_EACH_EDGE (e, ei, bb->succs) + if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))) + { + found = true; + break; + } - if (!e) + if (!found) return purged; - for (e = bb->succ; e; e = next) + /* Remove all but the fake and fallthru edges. The fake edge may be + the only successor for this block in the case of noreturn + calls. */ + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { - next = e->succ_next; - if (!(e->flags & EDGE_FALLTHRU)) + if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE))) { - bb->flags |= BB_DIRTY; + df_set_bb_dirty (bb); remove_edge (e); purged = true; } + else + ei_next (&ei); } - if (!bb->succ || bb->succ->succ_next) - abort (); + gcc_assert (single_succ_p (bb)); - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; + single_succ_edge (bb)->probability = REG_BR_PROB_BASE; + single_succ_edge (bb)->count = bb->count; if (dump_file) fprintf (dump_file, "Purged non-fallthru edges from bb %i\n", @@ -2439,52 +2422,124 @@ purge_dead_edges (basic_block bb) true if some edge has been eliminated. */ bool -purge_all_dead_edges (int update_life_p) +purge_all_dead_edges (void) { int purged = false; - sbitmap blocks = 0; basic_block bb; - if (update_life_p) - { - blocks = sbitmap_alloc (last_basic_block); - sbitmap_zero (blocks); - } - FOR_EACH_BB (bb) { bool purged_here = purge_dead_edges (bb); purged |= purged_here; - if (purged_here && update_life_p) - SET_BIT (blocks, bb->index); } - if (update_life_p && purged) - update_life_info (blocks, UPDATE_LIFE_GLOBAL, - PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE - | PROP_KILL_DEAD_CODE); - - if (update_life_p) - sbitmap_free (blocks); return purged; } +/* This is used by a few passes that emit some instructions after abnormal + calls, moving the basic block's end, while they in fact do want to emit + them on the fallthru edge. Look for abnormal call edges, find backward + the call in the block and insert the instructions on the edge instead. + + Similarly, handle instructions throwing exceptions internally. + + Return true when instructions have been found and inserted on edges. */ + +bool +fixup_abnormal_edges (void) +{ + bool inserted = false; + basic_block bb; + + FOR_EACH_BB (bb) + { + edge e; + edge_iterator ei; + + /* Look for cases we are interested in - calls or instructions causing + exceptions. */ + FOR_EACH_EDGE (e, ei, bb->succs) + if ((e->flags & EDGE_ABNORMAL_CALL) + || ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) + == (EDGE_ABNORMAL | EDGE_EH))) + break; + + if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb))) + { + rtx insn; + + /* Get past the new insns generated. Allow notes, as the insns + may be already deleted. */ + insn = BB_END (bb); + while ((NONJUMP_INSN_P (insn) || NOTE_P (insn)) + && !can_throw_internal (insn) + && insn != BB_HEAD (bb)) + insn = PREV_INSN (insn); + + if (CALL_P (insn) || can_throw_internal (insn)) + { + rtx stop, next; + + e = find_fallthru_edge (bb->succs); + + stop = NEXT_INSN (BB_END (bb)); + BB_END (bb) = insn; + + for (insn = NEXT_INSN (insn); insn != stop; insn = next) + { + next = NEXT_INSN (insn); + if (INSN_P (insn)) + { + delete_insn (insn); + + /* Sometimes there's still the return value USE. + If it's placed after a trapping call (i.e. that + call is the last insn anyway), we have no fallthru + edge. Simply delete this use and don't try to insert + on the non-existent edge. */ + if (GET_CODE (PATTERN (insn)) != USE) + { + /* We're not deleting it, we're moving it. */ + INSN_DELETED_P (insn) = 0; + PREV_INSN (insn) = NULL_RTX; + NEXT_INSN (insn) = NULL_RTX; + + insert_insn_on_edge (insn, e); + inserted = true; + } + } + else if (!BARRIER_P (insn)) + set_block_for_insn (insn, NULL); + } + } + + /* It may be that we don't find any trapping insn. In this + case we discovered quite late that the insn that had been + marked as can_throw_internal in fact couldn't trap at all. + So we should in fact delete the EH edges out of the block. */ + else + purge_dead_edges (bb); + } + } + + return inserted; +} + /* Same as split_block but update cfg_layout structures. */ static basic_block cfg_layout_split_block (basic_block bb, void *insnp) { - rtx insn = insnp; + rtx insn = (rtx) insnp; basic_block new_bb = rtl_split_block (bb, insn); - new_bb->rbi->footer = bb->rbi->footer; - bb->rbi->footer = NULL; + new_bb->il.rtl->footer = bb->il.rtl->footer; + bb->il.rtl->footer = NULL; return new_bb; } - /* Redirect Edge to DEST. */ static edge cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) @@ -2501,7 +2556,7 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) if (e->src != ENTRY_BLOCK_PTR && (ret = try_redirect_by_replacing_jump (e, dest, true))) { - src->flags |= BB_DIRTY; + df_set_bb_dirty (src); return ret; } @@ -2512,7 +2567,7 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n", e->src->index, dest->index); - e->src->flags |= BB_DIRTY; + df_set_bb_dirty (e->src); redirect_edge_succ (e, dest); return e; } @@ -2524,45 +2579,47 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) if (e->flags & EDGE_FALLTHRU) { /* Redirect any branch edges unified with the fallthru one. */ - if (GET_CODE (BB_END (src)) == JUMP_INSN + if (JUMP_P (BB_END (src)) && label_is_jump_target_p (BB_HEAD (e->dest), BB_END (src))) { + edge redirected; + if (dump_file) fprintf (dump_file, "Fallthru edge unified with branch " "%i->%i redirected to %i\n", e->src->index, e->dest->index, dest->index); e->flags &= ~EDGE_FALLTHRU; - if (!redirect_branch_edge (e, dest)) - abort (); - e->flags |= EDGE_FALLTHRU; - e->src->flags |= BB_DIRTY; - return e; + redirected = redirect_branch_edge (e, dest); + gcc_assert (redirected); + redirected->flags |= EDGE_FALLTHRU; + df_set_bb_dirty (redirected->src); + return redirected; } /* In case we are redirecting fallthru edge to the branch edge - of conditional jump, remove it. */ - if (src->succ->succ_next - && !src->succ->succ_next->succ_next) + of conditional jump, remove it. */ + if (EDGE_COUNT (src->succs) == 2) { - edge s = e->succ_next ? e->succ_next : src->succ; + /* Find the edge that is different from E. */ + edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e); + if (s->dest == dest && any_condjump_p (BB_END (src)) && onlyjump_p (BB_END (src))) delete_insn (BB_END (src)); } - ret = redirect_edge_succ_nodup (e, dest); if (dump_file) - fprintf (dump_file, "Fallthru edge %i->%i redirected to %i\n", + fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n", e->src->index, e->dest->index, dest->index); + ret = redirect_edge_succ_nodup (e, dest); } else ret = redirect_branch_edge (e, dest); /* We don't want simplejumps in the insn stream during cfglayout. */ - if (simplejump_p (BB_END (src))) - abort (); + gcc_assert (!simplejump_p (BB_END (src))); - src->flags |= BB_DIRTY; + df_set_bb_dirty (src); return ret; } @@ -2570,8 +2627,9 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) static basic_block cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest) { - if (!cfg_layout_redirect_edge_and_branch (e, dest)) - abort (); + edge redirected = cfg_layout_redirect_edge_and_branch (e, dest); + + gcc_assert (redirected); return NULL; } @@ -2582,44 +2640,44 @@ cfg_layout_delete_block (basic_block bb) { rtx insn, next, prev = PREV_INSN (BB_HEAD (bb)), *to, remaints; - if (bb->rbi->header) + if (bb->il.rtl->header) { next = BB_HEAD (bb); if (prev) - NEXT_INSN (prev) = bb->rbi->header; + NEXT_INSN (prev) = bb->il.rtl->header; else - set_first_insn (bb->rbi->header); - PREV_INSN (bb->rbi->header) = prev; - insn = bb->rbi->header; + set_first_insn (bb->il.rtl->header); + PREV_INSN (bb->il.rtl->header) = prev; + insn = bb->il.rtl->header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); NEXT_INSN (insn) = next; PREV_INSN (next) = insn; } next = NEXT_INSN (BB_END (bb)); - if (bb->rbi->footer) + if (bb->il.rtl->footer) { - insn = bb->rbi->footer; + insn = bb->il.rtl->footer; while (insn) { - if (GET_CODE (insn) == BARRIER) + if (BARRIER_P (insn)) { if (PREV_INSN (insn)) NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); else - bb->rbi->footer = NEXT_INSN (insn); + bb->il.rtl->footer = NEXT_INSN (insn); if (NEXT_INSN (insn)) PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); } - if (GET_CODE (insn) == CODE_LABEL) + if (LABEL_P (insn)) break; insn = NEXT_INSN (insn); } - if (bb->rbi->footer) + if (bb->il.rtl->footer) { insn = BB_END (bb); - NEXT_INSN (insn) = bb->rbi->footer; - PREV_INSN (bb->rbi->footer) = insn; + NEXT_INSN (insn) = bb->il.rtl->footer; + PREV_INSN (bb->il.rtl->footer) = insn; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); NEXT_INSN (insn) = next; @@ -2630,9 +2688,10 @@ cfg_layout_delete_block (basic_block bb) } } if (bb->next_bb != EXIT_BLOCK_PTR) - to = &bb->next_bb->rbi->header; + to = &bb->next_bb->il.rtl->header; else to = &cfg_layout_function_footer; + rtl_delete_block (bb); if (prev) @@ -2658,63 +2717,100 @@ cfg_layout_delete_block (basic_block bb) } /* Return true when blocks A and B can be safely merged. */ + static bool cfg_layout_can_merge_blocks_p (basic_block a, basic_block b) { - bool partitions_ok = true; - /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot - and cold sections. */ - - if (flag_reorder_blocks_and_partition - && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX) - || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) - || a->partition != b->partition)) - partitions_ok = false; + and cold sections. + + Basic block partitioning may result in some jumps that appear to + be optimizable (or blocks that appear to be mergeable), but which really + must be left untouched (they are required to make it safely across + partition boundaries). See the comments at the top of + bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ + + if (BB_PARTITION (a) != BB_PARTITION (b)) + return false; /* There must be exactly one edge in between the blocks. */ - return (a->succ && !a->succ->succ_next && a->succ->dest == b - && !b->pred->pred_next && a != b + return (single_succ_p (a) + && single_succ (a) == b + && single_pred_p (b) == 1 + && a != b /* Must be simple edge. */ - && !(a->succ->flags & EDGE_COMPLEX) - && partitions_ok + && !(single_succ_edge (a)->flags & EDGE_COMPLEX) && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR - /* If the jump insn has side effects, - we can't kill the edge. */ - && (GET_CODE (BB_END (a)) != JUMP_INSN - || (reload_completed + /* If the jump insn has side effects, we can't kill the edge. + When not optimizing, try_redirect_by_replacing_jump will + not allow us to redirect an edge by replacing a table jump. */ + && (!JUMP_P (BB_END (a)) + || ((!optimize || reload_completed) ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a))))); } -/* Merge block A and B, abort when it is not possible. */ +/* Merge block A and B. The blocks must be mergeable. */ + static void cfg_layout_merge_blocks (basic_block a, basic_block b) { -#ifdef ENABLE_CHECKING - if (!cfg_layout_can_merge_blocks_p (a, b)) - abort (); -#endif + bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0; + + gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b)); + + if (dump_file) + fprintf (dump_file, "Merging block %d into block %d...\n", b->index, + a->index); /* If there was a CODE_LABEL beginning B, delete it. */ - if (GET_CODE (BB_HEAD (b)) == CODE_LABEL) - delete_insn (BB_HEAD (b)); + if (LABEL_P (BB_HEAD (b))) + { + delete_insn (BB_HEAD (b)); + } /* We should have fallthru edge in a, or we can do dummy redirection to get it cleaned up. */ - if (GET_CODE (BB_END (a)) == JUMP_INSN) - try_redirect_by_replacing_jump (a->succ, b, true); - if (GET_CODE (BB_END (a)) == JUMP_INSN) - abort (); + if (JUMP_P (BB_END (a))) + try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true); + gcc_assert (!JUMP_P (BB_END (a))); + + /* When not optimizing and the edge is the only place in RTL which holds + some unique locus, emit a nop with that locus in between. */ + if (!optimize && EDGE_SUCC (a, 0)->goto_locus) + { + rtx insn = BB_END (a), end = PREV_INSN (BB_HEAD (a)); + int goto_locus = EDGE_SUCC (a, 0)->goto_locus; + + while (insn != end && (!INSN_P (insn) || INSN_LOCATOR (insn) == 0)) + insn = PREV_INSN (insn); + if (insn != end && locator_eq (INSN_LOCATOR (insn), goto_locus)) + goto_locus = 0; + else + { + insn = BB_HEAD (b); + end = NEXT_INSN (BB_END (b)); + while (insn != end && !INSN_P (insn)) + insn = NEXT_INSN (insn); + if (insn != end && INSN_LOCATOR (insn) != 0 + && locator_eq (INSN_LOCATOR (insn), goto_locus)) + goto_locus = 0; + } + if (goto_locus) + { + BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a); + INSN_LOCATOR (BB_END (a)) = goto_locus; + } + } /* Possible line number notes should appear in between. */ - if (b->rbi->header) + if (b->il.rtl->header) { rtx first = BB_END (a), last; - last = emit_insn_after (b->rbi->header, BB_END (a)); - delete_insn_chain (NEXT_INSN (first), last); - b->rbi->header = NULL; + last = emit_insn_after_noloc (b->il.rtl->header, BB_END (a), a); + delete_insn_chain (NEXT_INSN (first), last, false); + b->il.rtl->header = NULL; } /* In the case basic blocks are not adjacent, move them around. */ @@ -2722,13 +2818,19 @@ cfg_layout_merge_blocks (basic_block a, basic_block b) { rtx first = unlink_insn_chain (BB_HEAD (b), BB_END (b)); - emit_insn_after (first, BB_END (a)); + emit_insn_after_noloc (first, BB_END (a), a); /* Skip possible DELETED_LABEL insn. */ if (!NOTE_INSN_BASIC_BLOCK_P (first)) first = NEXT_INSN (first); - if (!NOTE_INSN_BASIC_BLOCK_P (first)) - abort (); + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first)); BB_HEAD (b) = NULL; + + /* emit_insn_after_noloc doesn't call df_insn_change_bb. + We need to explicitly call. */ + update_bb_for_insn_chain (NEXT_INSN (first), + BB_END (b), + a); + delete_insn (first); } /* Otherwise just re-associate the instructions. */ @@ -2736,41 +2838,43 @@ cfg_layout_merge_blocks (basic_block a, basic_block b) { rtx insn; - for (insn = BB_HEAD (b); - insn != NEXT_INSN (BB_END (b)); - insn = NEXT_INSN (insn)) - set_block_for_insn (insn, a); + update_bb_for_insn_chain (BB_HEAD (b), BB_END (b), a); + insn = BB_HEAD (b); /* Skip possible DELETED_LABEL insn. */ if (!NOTE_INSN_BASIC_BLOCK_P (insn)) insn = NEXT_INSN (insn); - if (!NOTE_INSN_BASIC_BLOCK_P (insn)) - abort (); + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn)); BB_HEAD (b) = NULL; BB_END (a) = BB_END (b); delete_insn (insn); } + df_bb_delete (b->index); + /* Possible tablejumps and barriers should appear after the block. */ - if (b->rbi->footer) + if (b->il.rtl->footer) { - if (!a->rbi->footer) - a->rbi->footer = b->rbi->footer; + if (!a->il.rtl->footer) + a->il.rtl->footer = b->il.rtl->footer; else { - rtx last = a->rbi->footer; + rtx last = a->il.rtl->footer; while (NEXT_INSN (last)) last = NEXT_INSN (last); - NEXT_INSN (last) = b->rbi->footer; - PREV_INSN (b->rbi->footer) = last; + NEXT_INSN (last) = b->il.rtl->footer; + PREV_INSN (b->il.rtl->footer) = last; } - b->rbi->footer = NULL; + b->il.rtl->footer = NULL; } + /* If B was a forwarder block, propagate the locus on the edge. */ + if (forwarder_p && !EDGE_SUCC (b, 0)->goto_locus) + EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus; + if (dump_file) - fprintf (dump_file, "Merged blocks %d and %d.\n", - a->index, b->index); + fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index); } /* Split edge E. */ @@ -2778,13 +2882,16 @@ cfg_layout_merge_blocks (basic_block a, basic_block b) static basic_block cfg_layout_split_edge (edge e) { - edge new_e; basic_block new_bb = create_basic_block (e->src != ENTRY_BLOCK_PTR ? NEXT_INSN (BB_END (e->src)) : get_insns (), NULL_RTX, e->src); - new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU); + if (e->dest == EXIT_BLOCK_PTR) + BB_COPY_PARTITION (new_bb, e->src); + else + BB_COPY_PARTITION (new_bb, e->dest); + make_edge (new_bb, e->dest, EDGE_FALLTHRU); redirect_edge_and_branch_force (e, new_bb); return new_bb; @@ -2805,17 +2912,19 @@ rtl_block_ends_with_call_p (basic_block bb) { rtx insn = BB_END (bb); - while (GET_CODE (insn) != CALL_INSN + while (!CALL_P (insn) && insn != BB_HEAD (bb) - && keep_with_call_p (insn)) + && (keep_with_call_p (insn) + || NOTE_P (insn) + || DEBUG_INSN_P (insn))) insn = PREV_INSN (insn); - return (GET_CODE (insn) == CALL_INSN); + return (CALL_P (insn)); } /* Return 1 if BB ends with a conditional branch, 0 otherwise. */ static bool -rtl_block_ends_with_condjump_p (basic_block bb) +rtl_block_ends_with_condjump_p (const_basic_block bb) { return any_condjump_p (BB_END (bb)); } @@ -2824,16 +2933,15 @@ rtl_block_ends_with_condjump_p (basic_block bb) Helper function for rtl_flow_call_edges_add. */ static bool -need_fake_edge_p (rtx insn) +need_fake_edge_p (const_rtx insn) { if (!INSN_P (insn)) return false; - if ((GET_CODE (insn) == CALL_INSN + if ((CALL_P (insn) && !SIBLING_CALL_P (insn) && !find_reg_note (insn, REG_NORETURN, NULL) - && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL) - && !CONST_OR_PURE_CALL_P (insn))) + && !(RTL_CONST_OR_PURE_CALL_P (insn)))) return true; return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS @@ -2860,7 +2968,7 @@ rtl_flow_call_edges_add (sbitmap blocks) int last_bb = last_basic_block; bool check_last_block = false; - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return 0; if (! blocks) @@ -2894,13 +3002,12 @@ rtl_flow_call_edges_add (sbitmap blocks) { edge e; - for (e = bb->succ; e; e = e->succ_next) - if (e->dest == EXIT_BLOCK_PTR) - { - insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); - commit_edge_insertions (); - break; - } + e = find_edge (bb, EXIT_BLOCK_PTR); + if (e) + { + insert_insn_on_edge (gen_use (const0_rtx), e); + commit_edge_insertions (); + } } } @@ -2908,7 +3015,7 @@ rtl_flow_call_edges_add (sbitmap blocks) calls since there is no way that we can determine if they will return or not... */ - for (i = 0; i < last_bb; i++) + for (i = NUM_FIXED_BLOCKS; i < last_bb; i++) { basic_block bb = BASIC_BLOCK (i); rtx insn; @@ -2929,22 +3036,23 @@ rtl_flow_call_edges_add (sbitmap blocks) rtx split_at_insn = insn; /* Don't split the block between a call and an insn that should - remain in the same block as the call. */ - if (GET_CODE (insn) == CALL_INSN) + remain in the same block as the call. */ + if (CALL_P (insn)) while (split_at_insn != BB_END (bb) && keep_with_call_p (NEXT_INSN (split_at_insn))) split_at_insn = NEXT_INSN (split_at_insn); /* The handling above of the final block before the epilogue - should be enough to verify that there is no edge to the exit + should be enough to verify that there is no edge to the exit block in CFG already. Calling make_edge in such case would cause us to mark that edge as fake and remove it later. */ #ifdef ENABLE_CHECKING if (split_at_insn == BB_END (bb)) - for (e = bb->succ; e; e = e->succ_next) - if (e->dest == EXIT_BLOCK_PTR) - abort (); + { + e = find_edge (bb, EXIT_BLOCK_PTR); + gcc_assert (e == NULL); + } #endif /* Note that the following may create a new basic block @@ -2970,6 +3078,104 @@ rtl_flow_call_edges_add (sbitmap blocks) return blocks_split; } +/* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is + the conditional branch target, SECOND_HEAD should be the fall-thru + there is no need to handle this here the loop versioning code handles + this. the reason for SECON_HEAD is that it is needed for condition + in trees, and this should be of the same type since it is a hook. */ +static void +rtl_lv_add_condition_to_bb (basic_block first_head , + basic_block second_head ATTRIBUTE_UNUSED, + basic_block cond_bb, void *comp_rtx) +{ + rtx label, seq, jump; + rtx op0 = XEXP ((rtx)comp_rtx, 0); + rtx op1 = XEXP ((rtx)comp_rtx, 1); + enum rtx_code comp = GET_CODE ((rtx)comp_rtx); + enum machine_mode mode; + + + label = block_label (first_head); + mode = GET_MODE (op0); + if (mode == VOIDmode) + mode = GET_MODE (op1); + + start_sequence (); + op0 = force_operand (op0, NULL_RTX); + op1 = force_operand (op1, NULL_RTX); + do_compare_rtx_and_jump (op0, op1, comp, 0, + mode, NULL_RTX, NULL_RTX, label, -1); + jump = get_last_insn (); + JUMP_LABEL (jump) = label; + LABEL_NUSES (label)++; + seq = get_insns (); + end_sequence (); + + /* Add the new cond , in the new head. */ + emit_insn_after(seq, BB_END(cond_bb)); +} + + +/* Given a block B with unconditional branch at its end, get the + store the return the branch edge and the fall-thru edge in + BRANCH_EDGE and FALLTHRU_EDGE respectively. */ +static void +rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge, + edge *fallthru_edge) +{ + edge e = EDGE_SUCC (b, 0); + + if (e->flags & EDGE_FALLTHRU) + { + *fallthru_edge = e; + *branch_edge = EDGE_SUCC (b, 1); + } + else + { + *branch_edge = e; + *fallthru_edge = EDGE_SUCC (b, 1); + } +} + +void +init_rtl_bb_info (basic_block bb) +{ + gcc_assert (!bb->il.rtl); + bb->il.rtl = ggc_alloc_cleared_rtl_bb_info (); +} + +/* Returns true if it is possible to remove edge E by redirecting + it to the destination of the other edge from E->src. */ + +static bool +rtl_can_remove_branch_p (const_edge e) +{ + const_basic_block src = e->src; + const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest; + const_rtx insn = BB_END (src), set; + + /* The conditions are taken from try_redirect_by_replacing_jump. */ + if (target == EXIT_BLOCK_PTR) + return false; + + if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) + return false; + + if (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX) + || BB_PARTITION (src) != BB_PARTITION (target)) + return false; + + if (!onlyjump_p (insn) + || tablejump_p (insn, NULL, NULL)) + return false; + + set = single_set (insn); + if (!set || side_effects_p (set)) + return false; + + return true; +} + /* Implementation of CFG manipulation for linearized RTL. */ struct cfg_hooks rtl_cfg_hooks = { "rtl", @@ -2978,6 +3184,7 @@ struct cfg_hooks rtl_cfg_hooks = { rtl_create_basic_block, rtl_redirect_edge_and_branch, rtl_redirect_edge_and_branch_force, + rtl_can_remove_branch_p, rtl_delete_block, rtl_split_block, rtl_move_block_after, @@ -2990,9 +3197,17 @@ struct cfg_hooks rtl_cfg_hooks = { rtl_split_edge, rtl_make_forwarder_block, rtl_tidy_fallthru_edge, + rtl_force_nonfallthru, rtl_block_ends_with_call_p, rtl_block_ends_with_condjump_p, - rtl_flow_call_edges_add + rtl_flow_call_edges_add, + NULL, /* execute_on_growing_pred */ + NULL, /* execute_on_shrinking_pred */ + NULL, /* duplicate loop for trees */ + NULL, /* lv_add_condition_to_bb */ + NULL, /* lv_adjust_loop_header_phi*/ + NULL, /* extract_cond_bb_edges */ + NULL /* flush_pending_stmts */ }; /* Implementation of CFG manipulation for cfg layout RTL, where @@ -3003,8 +3218,8 @@ struct cfg_hooks rtl_cfg_hooks = { /* We do not want to declare these functions in a header file, since they should only be used through the cfghooks interface, and we do not want to move them here since it would require also moving quite a lot of related - code. */ -extern bool cfg_layout_can_duplicate_bb_p (basic_block); + code. They are in cfglayout.c. */ +extern bool cfg_layout_can_duplicate_bb_p (const_basic_block); extern basic_block cfg_layout_duplicate_bb (basic_block); struct cfg_hooks cfg_layout_rtl_cfg_hooks = { @@ -3014,6 +3229,7 @@ struct cfg_hooks cfg_layout_rtl_cfg_hooks = { cfg_layout_create_basic_block, cfg_layout_redirect_edge_and_branch, cfg_layout_redirect_edge_and_branch_force, + rtl_can_remove_branch_p, cfg_layout_delete_block, cfg_layout_split_block, rtl_move_block_after, @@ -3025,9 +3241,16 @@ struct cfg_hooks cfg_layout_rtl_cfg_hooks = { cfg_layout_duplicate_bb, cfg_layout_split_edge, rtl_make_forwarder_block, - NULL, + NULL, /* tidy_fallthru_edge */ + rtl_force_nonfallthru, rtl_block_ends_with_call_p, rtl_block_ends_with_condjump_p, - rtl_flow_call_edges_add + rtl_flow_call_edges_add, + NULL, /* execute_on_growing_pred */ + NULL, /* execute_on_shrinking_pred */ + duplicate_loop_to_header_edge, /* duplicate loop for trees */ + rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */ + NULL, /* lv_adjust_loop_header_phi*/ + rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */ + NULL /* flush_pending_stmts */ }; -