1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
49 /* cleanup_cfg maintains following flags for each basic block. */
53 /* Set if BB is the forwarder block to avoid too many
54 forwarder_block_p calls. */
55 BB_FORWARDER_BLOCK = 1,
56 BB_NONTHREADABLE_BLOCK = 2
59 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
60 #define BB_SET_FLAG(BB, FLAG) \
61 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
62 #define BB_CLEAR_FLAG(BB, FLAG) \
63 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
65 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
67 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
68 static bool try_crossjump_bb PARAMS ((int, basic_block));
69 static bool outgoing_edges_match PARAMS ((int,
70 basic_block, basic_block));
71 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
73 static bool insns_match_p PARAMS ((int, rtx, rtx));
75 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
76 static bool tail_recursion_label_p PARAMS ((rtx));
77 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
79 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
81 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
83 static bool try_optimize_cfg PARAMS ((int));
84 static bool try_simplify_condjump PARAMS ((basic_block));
85 static bool try_forward_edges PARAMS ((int, basic_block));
86 static edge thread_jump PARAMS ((int, edge, basic_block));
87 static bool mark_effect PARAMS ((rtx, bitmap));
88 static void notice_new_block PARAMS ((basic_block));
89 static void update_forwarder_flag PARAMS ((basic_block));
90 static int mentions_nonequal_regs PARAMS ((rtx *, void *));
92 /* Set flags for newly created block. */
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
105 /* Recompute forwarder flag after block has been modified. */
108 update_forwarder_flag (bb)
111 if (forwarder_block_p (bb))
112 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
114 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
117 /* Simplify a conditional jump around an unconditional jump.
118 Return true if something changed. */
121 try_simplify_condjump (cbranch_block)
122 basic_block cbranch_block;
124 basic_block jump_block, jump_dest_block, cbranch_dest_block;
125 edge cbranch_jump_edge, cbranch_fallthru_edge;
128 /* Verify that there are exactly two successors. */
129 if (!cbranch_block->succ
130 || !cbranch_block->succ->succ_next
131 || cbranch_block->succ->succ_next->succ_next)
134 /* Verify that we've got a normal conditional branch at the end
136 cbranch_insn = cbranch_block->end;
137 if (!any_condjump_p (cbranch_insn))
140 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
141 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
143 /* The next block must not have multiple predecessors, must not
144 be the last block in the function, and must contain just the
145 unconditional jump. */
146 jump_block = cbranch_fallthru_edge->dest;
147 if (jump_block->pred->pred_next
148 || jump_block->next_bb == EXIT_BLOCK_PTR
149 || !FORWARDER_BLOCK_P (jump_block))
151 jump_dest_block = jump_block->succ->dest;
153 /* The conditional branch must target the block after the
154 unconditional branch. */
155 cbranch_dest_block = cbranch_jump_edge->dest;
157 if (!can_fallthru (jump_block, cbranch_dest_block))
160 /* Invert the conditional branch. */
161 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
165 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
166 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
168 /* Success. Update the CFG to match. Note that after this point
169 the edge variable names appear backwards; the redirection is done
170 this way to preserve edge profile data. */
171 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
173 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
175 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
176 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
177 update_br_prob_note (cbranch_block);
179 /* Delete the block with the unconditional jump, and clean up the mess. */
180 flow_delete_block (jump_block);
181 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
186 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
187 on register. Used by jump threading. */
190 mark_effect (exp, nonequal)
196 switch (GET_CODE (exp))
198 /* In case we do clobber the register, mark it as equal, as we know the
199 value is dead so it don't have to match. */
201 if (REG_P (XEXP (exp, 0)))
203 dest = XEXP (exp, 0);
204 regno = REGNO (dest);
205 CLEAR_REGNO_REG_SET (nonequal, regno);
206 if (regno < FIRST_PSEUDO_REGISTER)
208 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
210 CLEAR_REGNO_REG_SET (nonequal, regno + n);
216 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
218 dest = SET_DEST (exp);
223 regno = REGNO (dest);
224 SET_REGNO_REG_SET (nonequal, regno);
225 if (regno < FIRST_PSEUDO_REGISTER)
227 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
229 SET_REGNO_REG_SET (nonequal, regno + n);
238 /* Return nonzero if X is an register set in regset DATA.
239 Called via for_each_rtx. */
241 mentions_nonequal_regs (x, data)
245 regset nonequal = (regset) data;
251 if (REGNO_REG_SET_P (nonequal, regno))
253 if (regno < FIRST_PSEUDO_REGISTER)
255 int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
257 if (REGNO_REG_SET_P (nonequal, regno + n))
263 /* Attempt to prove that the basic block B will have no side effects and
264 allways continues in the same edge if reached via E. Return the edge
265 if exist, NULL otherwise. */
268 thread_jump (mode, e, b)
273 rtx set1, set2, cond1, cond2, insn;
274 enum rtx_code code1, code2, reversed_code2;
275 bool reverse1 = false;
280 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
283 /* At the moment, we do handle only conditional jumps, but later we may
284 want to extend this code to tablejumps and others. */
285 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
287 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
289 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
293 /* Second branch must end with onlyjump, as we will eliminate the jump. */
294 if (!any_condjump_p (e->src->end))
297 if (!any_condjump_p (b->end) || !onlyjump_p (b->end))
299 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
303 set1 = pc_set (e->src->end);
304 set2 = pc_set (b->end);
305 if (((e->flags & EDGE_FALLTHRU) != 0)
306 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
309 cond1 = XEXP (SET_SRC (set1), 0);
310 cond2 = XEXP (SET_SRC (set2), 0);
312 code1 = reversed_comparison_code (cond1, e->src->end);
314 code1 = GET_CODE (cond1);
316 code2 = GET_CODE (cond2);
317 reversed_code2 = reversed_comparison_code (cond2, b->end);
319 if (!comparison_dominates_p (code1, code2)
320 && !comparison_dominates_p (code1, reversed_code2))
323 /* Ensure that the comparison operators are equivalent.
324 ??? This is far too pesimistic. We should allow swapped operands,
325 different CCmodes, or for example comparisons for interval, that
326 dominate even when operands are not equivalent. */
327 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
328 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
331 /* Short circuit cases where block B contains some side effects, as we can't
333 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
334 insn = NEXT_INSN (insn))
335 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
337 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
343 /* First process all values computed in the source basic block. */
344 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
345 insn = NEXT_INSN (insn))
347 cselib_process_insn (insn);
349 nonequal = BITMAP_XMALLOC();
350 CLEAR_REG_SET (nonequal);
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
356 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
357 insn = NEXT_INSN (insn))
361 rtx pat = PATTERN (insn);
363 if (GET_CODE (pat) == PARALLEL)
365 for (i = 0; i < XVECLEN (pat, 0); i++)
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
369 failed |= mark_effect (pat, nonequal);
372 cselib_process_insn (insn);
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
379 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
383 /* cond2 must not mention any register that is not equal to the
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
388 /* In case liveness information is available, we need to prove equivalence
389 only of the live values. */
390 if (mode & CLEANUP_UPDATE_LIFE)
391 AND_REG_SET (nonequal, b->global_live_at_end);
393 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
395 BITMAP_XFREE (nonequal);
397 if ((comparison_dominates_p (code1, code2) != 0)
398 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
399 return BRANCH_EDGE (b);
401 return FALLTHRU_EDGE (b);
404 BITMAP_XFREE (nonequal);
409 /* Attempt to forward edges leaving basic block B.
410 Return true if successful. */
413 try_forward_edges (mode, b)
417 bool changed = false;
418 edge e, next, *threaded_edges = NULL;
420 for (e = b->succ; e; e = next)
422 basic_block target, first;
424 bool threaded = false;
425 int nthreaded_edges = 0;
429 /* Skip complex edges because we don't know how to update them.
431 Still handle fallthru edges, as we can succeed to forward fallthru
432 edge to the same place as the branch edge of conditional branch
433 and turn conditional branch to an unconditional branch. */
434 if (e->flags & EDGE_COMPLEX)
437 target = first = e->dest;
440 while (counter < n_basic_blocks)
442 basic_block new_target = NULL;
443 bool new_target_threaded = false;
445 if (FORWARDER_BLOCK_P (target)
446 && target->succ->dest != EXIT_BLOCK_PTR)
448 /* Bypass trivial infinite loops. */
449 if (target == target->succ->dest)
450 counter = n_basic_blocks;
451 new_target = target->succ->dest;
454 /* Allow to thread only over one edge at time to simplify updating
456 else if (mode & CLEANUP_THREADING)
458 edge t = thread_jump (mode, e, target);
462 threaded_edges = xmalloc (sizeof (*threaded_edges)
468 /* Detect an infinite loop across blocks not
469 including the start block. */
470 for (i = 0; i < nthreaded_edges; ++i)
471 if (threaded_edges[i] == t)
473 if (i < nthreaded_edges)
475 counter = n_basic_blocks;
480 /* Detect an infinite loop across the start block. */
484 if (nthreaded_edges >= n_basic_blocks)
486 threaded_edges[nthreaded_edges++] = t;
488 new_target = t->dest;
489 new_target_threaded = true;
496 /* Avoid killing of loop pre-headers, as it is the place loop
497 optimizer wants to hoist code to.
499 For fallthru forwarders, the LOOP_BEG note must appear between
500 the header of block and CODE_LABEL of the loop, for non forwarders
501 it must appear before the JUMP_INSN. */
502 if (mode & CLEANUP_PRE_LOOP)
504 rtx insn = (target->succ->flags & EDGE_FALLTHRU
505 ? target->head : prev_nonnote_insn (target->end));
507 if (GET_CODE (insn) != NOTE)
508 insn = NEXT_INSN (insn);
510 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
511 insn = NEXT_INSN (insn))
512 if (GET_CODE (insn) == NOTE
513 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
516 if (GET_CODE (insn) == NOTE)
522 threaded |= new_target_threaded;
525 if (counter >= n_basic_blocks)
528 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
531 else if (target == first)
532 ; /* We didn't do anything. */
535 /* Save the values now, as the edge may get removed. */
536 gcov_type edge_count = e->count;
537 int edge_probability = e->probability;
541 /* Don't force if target is exit block. */
542 if (threaded && target != EXIT_BLOCK_PTR)
544 notice_new_block (redirect_edge_and_branch_force (e, target));
546 fprintf (rtl_dump_file, "Conditionals threaded.\n");
548 else if (!redirect_edge_and_branch (e, target))
551 fprintf (rtl_dump_file,
552 "Forwarding edge %i->%i to %i failed.\n",
553 b->index, e->dest->index, target->index);
557 /* We successfully forwarded the edge. Now update profile
558 data: for each edge we traversed in the chain, remove
559 the original edge's execution count. */
560 edge_frequency = ((edge_probability * b->frequency
561 + REG_BR_PROB_BASE / 2)
564 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
565 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
571 first->count -= edge_count;
572 if (first->count < 0)
574 first->frequency -= edge_frequency;
575 if (first->frequency < 0)
576 first->frequency = 0;
577 if (first->succ->succ_next)
581 if (n >= nthreaded_edges)
583 t = threaded_edges [n++];
586 if (first->frequency)
587 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
590 if (prob > t->probability)
591 prob = t->probability;
592 t->probability -= prob;
593 prob = REG_BR_PROB_BASE - prob;
596 first->succ->probability = REG_BR_PROB_BASE;
597 first->succ->succ_next->probability = 0;
600 for (e = first->succ; e; e = e->succ_next)
601 e->probability = ((e->probability * REG_BR_PROB_BASE)
603 update_br_prob_note (first);
607 /* It is possible that as the result of
608 threading we've removed edge as it is
609 threaded to the fallthru edge. Avoid
610 getting out of sync. */
611 if (n < nthreaded_edges
612 && first == threaded_edges [n]->src)
617 t->count -= edge_count;
622 while (first != target);
629 free (threaded_edges);
633 /* Return true if LABEL is a target of JUMP_INSN. This applies only
634 to non-complex jumps. That is, direct unconditional, conditional,
635 and tablejumps, but not computed jumps or returns. It also does
636 not apply to the fallthru case of a conditional jump. */
639 label_is_jump_target_p (label, jump_insn)
640 rtx label, jump_insn;
642 rtx tmp = JUMP_LABEL (jump_insn);
648 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
649 && GET_CODE (tmp) == JUMP_INSN
650 && (tmp = PATTERN (tmp),
651 GET_CODE (tmp) == ADDR_VEC
652 || GET_CODE (tmp) == ADDR_DIFF_VEC))
654 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
655 int i, veclen = GET_NUM_ELEM (vec);
657 for (i = 0; i < veclen; ++i)
658 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
665 /* Return true if LABEL is used for tail recursion. */
668 tail_recursion_label_p (label)
673 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
674 if (label == XEXP (x, 0))
680 /* Blocks A and B are to be merged into a single block. A has no incoming
681 fallthru edge, so it can be moved before B without adding or modifying
682 any jumps (aside from the jump from A to B). */
685 merge_blocks_move_predecessor_nojumps (a, b)
690 barrier = next_nonnote_insn (a->end);
691 if (GET_CODE (barrier) != BARRIER)
693 delete_insn (barrier);
695 /* Move block and loop notes out of the chain so that we do not
698 ??? A better solution would be to squeeze out all the non-nested notes
699 and adjust the block trees appropriately. Even better would be to have
700 a tighter connection between block trees and rtl so that this is not
702 if (squeeze_notes (&a->head, &a->end))
705 /* Scramble the insn chain. */
706 if (a->end != PREV_INSN (b->head))
707 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
708 a->flags |= BB_DIRTY;
711 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
714 /* Swap the records for the two blocks around. */
717 link_block (a, b->prev_bb);
719 /* Now blocks A and B are contiguous. Merge them. */
720 merge_blocks_nomove (a, b);
723 /* Blocks A and B are to be merged into a single block. B has no outgoing
724 fallthru edge, so it can be moved after A without adding or modifying
725 any jumps (aside from the jump from A to B). */
728 merge_blocks_move_successor_nojumps (a, b)
731 rtx barrier, real_b_end;
734 barrier = NEXT_INSN (b->end);
736 /* Recognize a jump table following block B. */
738 && GET_CODE (barrier) == CODE_LABEL
739 && NEXT_INSN (barrier)
740 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
741 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
742 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
744 /* Temporarily add the table jump insn to b, so that it will also
745 be moved to the correct location. */
746 b->end = NEXT_INSN (barrier);
747 barrier = NEXT_INSN (b->end);
750 /* There had better have been a barrier there. Delete it. */
751 if (barrier && GET_CODE (barrier) == BARRIER)
752 delete_insn (barrier);
754 /* Move block and loop notes out of the chain so that we do not
757 ??? A better solution would be to squeeze out all the non-nested notes
758 and adjust the block trees appropriately. Even better would be to have
759 a tighter connection between block trees and rtl so that this is not
761 if (squeeze_notes (&b->head, &b->end))
764 /* Scramble the insn chain. */
765 reorder_insns_nobb (b->head, b->end, a->end);
767 /* Restore the real end of b. */
771 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
774 /* Now blocks A and B are contiguous. Merge them. */
775 merge_blocks_nomove (a, b);
778 /* Attempt to merge basic blocks that are potentially non-adjacent.
779 Return true iff the attempt succeeded. */
782 merge_blocks (e, b, c, mode)
787 /* If C has a tail recursion label, do not merge. There is no
788 edge recorded from the call_placeholder back to this label, as
789 that would make optimize_sibling_and_tail_recursive_calls more
790 complex for no gain. */
791 if ((mode & CLEANUP_PRE_SIBCALL)
792 && GET_CODE (c->head) == CODE_LABEL
793 && tail_recursion_label_p (c->head))
796 /* If B has a fallthru edge to C, no need to move anything. */
797 if (e->flags & EDGE_FALLTHRU)
799 int b_index = b->index, c_index = c->index;
800 merge_blocks_nomove (b, c);
801 update_forwarder_flag (b);
804 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
810 /* Otherwise we will need to move code around. Do that only if expensive
811 transformations are allowed. */
812 else if (mode & CLEANUP_EXPENSIVE)
814 edge tmp_edge, b_fallthru_edge;
815 bool c_has_outgoing_fallthru;
816 bool b_has_incoming_fallthru;
818 /* Avoid overactive code motion, as the forwarder blocks should be
819 eliminated by edge redirection instead. One exception might have
820 been if B is a forwarder block and C has no fallthru edge, but
821 that should be cleaned up by bb-reorder instead. */
822 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
825 /* We must make sure to not munge nesting of lexical blocks,
826 and loop notes. This is done by squeezing out all the notes
827 and leaving them there to lie. Not ideal, but functional. */
829 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
830 if (tmp_edge->flags & EDGE_FALLTHRU)
833 c_has_outgoing_fallthru = (tmp_edge != NULL);
835 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
836 if (tmp_edge->flags & EDGE_FALLTHRU)
839 b_has_incoming_fallthru = (tmp_edge != NULL);
840 b_fallthru_edge = tmp_edge;
842 /* Otherwise, we're going to try to move C after B. If C does
843 not have an outgoing fallthru, then it can be moved
844 immediately after B without introducing or modifying jumps. */
845 if (! c_has_outgoing_fallthru)
847 merge_blocks_move_successor_nojumps (b, c);
851 /* If B does not have an incoming fallthru, then it can be moved
852 immediately before C without introducing or modifying jumps.
853 C cannot be the first block, so we do not have to worry about
854 accessing a non-existent block. */
856 if (b_has_incoming_fallthru)
860 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
862 bb = force_nonfallthru (b_fallthru_edge);
864 notice_new_block (bb);
867 merge_blocks_move_predecessor_nojumps (b, c);
875 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
878 insns_match_p (mode, i1, i2)
879 int mode ATTRIBUTE_UNUSED;
884 /* Verify that I1 and I2 are equivalent. */
885 if (GET_CODE (i1) != GET_CODE (i2))
891 if (GET_CODE (p1) != GET_CODE (p2))
894 /* If this is a CALL_INSN, compare register usage information.
895 If we don't check this on stack register machines, the two
896 CALL_INSNs might be merged leaving reg-stack.c with mismatching
897 numbers of stack registers in the same basic block.
898 If we don't check this on machines with delay slots, a delay slot may
899 be filled that clobbers a parameter expected by the subroutine.
901 ??? We take the simple route for now and assume that if they're
902 equal, they were constructed identically. */
904 if (GET_CODE (i1) == CALL_INSN
905 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
906 CALL_INSN_FUNCTION_USAGE (i2)))
910 /* If cross_jump_death_matters is not 0, the insn's mode
911 indicates whether or not the insn contains any stack-like
914 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
916 /* If register stack conversion has already been done, then
917 death notes must also be compared before it is certain that
918 the two instruction streams match. */
921 HARD_REG_SET i1_regset, i2_regset;
923 CLEAR_HARD_REG_SET (i1_regset);
924 CLEAR_HARD_REG_SET (i2_regset);
926 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
927 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
928 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
930 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
931 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
932 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
934 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
944 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
946 /* The following code helps take care of G++ cleanups. */
947 rtx equiv1 = find_reg_equal_equiv_note (i1);
948 rtx equiv2 = find_reg_equal_equiv_note (i2);
951 /* If the equivalences are not to a constant, they may
952 reference pseudos that no longer exist, so we can't
954 && (! reload_completed
955 || (CONSTANT_P (XEXP (equiv1, 0))
956 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
958 rtx s1 = single_set (i1);
959 rtx s2 = single_set (i2);
960 if (s1 != 0 && s2 != 0
961 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
963 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
964 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
965 if (! rtx_renumbered_equal_p (p1, p2))
967 else if (apply_change_group ())
978 /* Look through the insns at the end of BB1 and BB2 and find the longest
979 sequence that are equivalent. Store the first insns for that sequence
980 in *F1 and *F2 and return the sequence length.
982 To simplify callers of this function, if the blocks match exactly,
983 store the head of the blocks in *F1 and *F2. */
986 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
987 int mode ATTRIBUTE_UNUSED;
988 basic_block bb1, bb2;
991 rtx i1, i2, last1, last2, afterlast1, afterlast2;
994 /* Skip simple jumps at the end of the blocks. Complex jumps still
995 need to be compared for equivalence, which we'll do below. */
998 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1000 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1003 i1 = PREV_INSN (i1);
1008 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1011 /* Count everything except for unconditional jump as insn. */
1012 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1014 i2 = PREV_INSN (i2);
1020 while (!active_insn_p (i1) && i1 != bb1->head)
1021 i1 = PREV_INSN (i1);
1023 while (!active_insn_p (i2) && i2 != bb2->head)
1024 i2 = PREV_INSN (i2);
1026 if (i1 == bb1->head || i2 == bb2->head)
1029 if (!insns_match_p (mode, i1, i2))
1032 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
1033 if (active_insn_p (i1))
1035 /* If the merged insns have different REG_EQUAL notes, then
1037 rtx equiv1 = find_reg_equal_equiv_note (i1);
1038 rtx equiv2 = find_reg_equal_equiv_note (i2);
1040 if (equiv1 && !equiv2)
1041 remove_note (i1, equiv1);
1042 else if (!equiv1 && equiv2)
1043 remove_note (i2, equiv2);
1044 else if (equiv1 && equiv2
1045 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1047 remove_note (i1, equiv1);
1048 remove_note (i2, equiv2);
1051 afterlast1 = last1, afterlast2 = last2;
1052 last1 = i1, last2 = i2;
1056 i1 = PREV_INSN (i1);
1057 i2 = PREV_INSN (i2);
1061 /* Don't allow the insn after a compare to be shared by
1062 cross-jumping unless the compare is also shared. */
1063 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1064 last1 = afterlast1, last2 = afterlast2, ninsns--;
1067 /* Include preceding notes and labels in the cross-jump. One,
1068 this may bring us to the head of the blocks as requested above.
1069 Two, it keeps line number notes as matched as may be. */
1072 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
1073 last1 = PREV_INSN (last1);
1075 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1076 last1 = PREV_INSN (last1);
1078 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
1079 last2 = PREV_INSN (last2);
1081 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1082 last2 = PREV_INSN (last2);
1091 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1092 the branch instruction. This means that if we commonize the control
1093 flow before end of the basic block, the semantic remains unchanged.
1095 We may assume that there exists one edge with a common destination. */
1098 outgoing_edges_match (mode, bb1, bb2)
1103 int nehedges1 = 0, nehedges2 = 0;
1104 edge fallthru1 = 0, fallthru2 = 0;
1107 /* If BB1 has only one successor, we may be looking at either an
1108 unconditional jump, or a fake edge to exit. */
1109 if (bb1->succ && !bb1->succ->succ_next
1110 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
1111 return (bb2->succ && !bb2->succ->succ_next
1112 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
1114 /* Match conditional jumps - this may get tricky when fallthru and branch
1115 edges are crossed. */
1117 && bb1->succ->succ_next
1118 && !bb1->succ->succ_next->succ_next
1119 && any_condjump_p (bb1->end)
1120 && onlyjump_p (bb1->end))
1122 edge b1, f1, b2, f2;
1123 bool reverse, match;
1124 rtx set1, set2, cond1, cond2;
1125 enum rtx_code code1, code2;
1128 || !bb2->succ->succ_next
1129 || bb2->succ->succ_next->succ_next
1130 || !any_condjump_p (bb2->end)
1131 || !onlyjump_p (bb2->end))
1134 /* Do not crossjump across loop boundaries. This is a temporary
1135 workaround for the common scenario in which crossjumping results
1136 in killing the duplicated loop condition, making bb-reorder rotate
1137 the loop incorectly, leaving an extra unconditional jump inside
1140 This check should go away once bb-reorder knows how to duplicate
1141 code in this case or rotate the loops to avoid this scenario. */
1142 if (bb1->loop_depth != bb2->loop_depth)
1145 b1 = BRANCH_EDGE (bb1);
1146 b2 = BRANCH_EDGE (bb2);
1147 f1 = FALLTHRU_EDGE (bb1);
1148 f2 = FALLTHRU_EDGE (bb2);
1150 /* Get around possible forwarders on fallthru edges. Other cases
1151 should be optimized out already. */
1152 if (FORWARDER_BLOCK_P (f1->dest))
1153 f1 = f1->dest->succ;
1155 if (FORWARDER_BLOCK_P (f2->dest))
1156 f2 = f2->dest->succ;
1158 /* To simplify use of this function, return false if there are
1159 unneeded forwarder blocks. These will get eliminated later
1160 during cleanup_cfg. */
1161 if (FORWARDER_BLOCK_P (f1->dest)
1162 || FORWARDER_BLOCK_P (f2->dest)
1163 || FORWARDER_BLOCK_P (b1->dest)
1164 || FORWARDER_BLOCK_P (b2->dest))
1167 if (f1->dest == f2->dest && b1->dest == b2->dest)
1169 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1174 set1 = pc_set (bb1->end);
1175 set2 = pc_set (bb2->end);
1176 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1177 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1180 cond1 = XEXP (SET_SRC (set1), 0);
1181 cond2 = XEXP (SET_SRC (set2), 0);
1182 code1 = GET_CODE (cond1);
1184 code2 = reversed_comparison_code (cond2, bb2->end);
1186 code2 = GET_CODE (cond2);
1188 if (code2 == UNKNOWN)
1191 /* Verify codes and operands match. */
1192 match = ((code1 == code2
1193 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1194 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1195 || (code1 == swap_condition (code2)
1196 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1198 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1201 /* If we return true, we will join the blocks. Which means that
1202 we will only have one branch prediction bit to work with. Thus
1203 we require the existing branches to have probabilities that are
1207 && maybe_hot_bb_p (bb1)
1208 && maybe_hot_bb_p (bb2))
1212 if (b1->dest == b2->dest)
1213 prob2 = b2->probability;
1215 /* Do not use f2 probability as f2 may be forwarded. */
1216 prob2 = REG_BR_PROB_BASE - b2->probability;
1218 /* Fail if the difference in probabilities is greater than 50%.
1219 This rules out two well-predicted branches with opposite
1221 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1224 fprintf (rtl_dump_file,
1225 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1226 bb1->index, bb2->index, b1->probability, prob2);
1232 if (rtl_dump_file && match)
1233 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1234 bb1->index, bb2->index);
1239 /* Generic case - we are seeing an computed jump, table jump or trapping
1242 /* First ensure that the instructions match. There may be many outgoing
1243 edges so this test is generally cheaper.
1244 ??? Currently the tablejumps will never match, as they do have
1245 different tables. */
1246 if (!insns_match_p (mode, bb1->end, bb2->end))
1249 /* Search the outgoing edges, ensure that the counts do match, find possible
1250 fallthru and exception handling edges since these needs more
1252 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1253 e1 = e1->succ_next, e2 = e2->succ_next)
1255 if (e1->flags & EDGE_EH)
1258 if (e2->flags & EDGE_EH)
1261 if (e1->flags & EDGE_FALLTHRU)
1263 if (e2->flags & EDGE_FALLTHRU)
1267 /* If number of edges of various types does not match, fail. */
1269 || nehedges1 != nehedges2
1270 || (fallthru1 != 0) != (fallthru2 != 0))
1273 /* fallthru edges must be forwarded to the same destination. */
1276 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1277 ? fallthru1->dest->succ->dest: fallthru1->dest);
1278 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1279 ? fallthru2->dest->succ->dest: fallthru2->dest);
1285 /* In case we do have EH edges, ensure we are in the same region. */
1288 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1289 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1291 if (XEXP (n1, 0) != XEXP (n2, 0))
1295 /* We don't need to match the rest of edges as above checks should be enought
1296 to ensure that they are equivalent. */
1300 /* E1 and E2 are edges with the same destination block. Search their
1301 predecessors for common code. If found, redirect control flow from
1302 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1305 try_crossjump_to_edge (mode, e1, e2)
1310 basic_block src1 = e1->src, src2 = e2->src;
1311 basic_block redirect_to;
1312 rtx newpos1, newpos2;
1317 /* Search backward through forwarder blocks. We don't need to worry
1318 about multiple entry or chained forwarders, as they will be optimized
1319 away. We do this to look past the unconditional jump following a
1320 conditional jump that is required due to the current CFG shape. */
1322 && !src1->pred->pred_next
1323 && FORWARDER_BLOCK_P (src1))
1324 e1 = src1->pred, src1 = e1->src;
1327 && !src2->pred->pred_next
1328 && FORWARDER_BLOCK_P (src2))
1329 e2 = src2->pred, src2 = e2->src;
1331 /* Nothing to do if we reach ENTRY, or a common source block. */
1332 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1337 /* Seeing more than 1 forwarder blocks would confuse us later... */
1338 if (FORWARDER_BLOCK_P (e1->dest)
1339 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1342 if (FORWARDER_BLOCK_P (e2->dest)
1343 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1346 /* Likewise with dead code (possibly newly created by the other optimizations
1348 if (!src1->pred || !src2->pred)
1351 /* Look for the common insn sequence, part the first ... */
1352 if (!outgoing_edges_match (mode, src1, src2))
1355 /* ... and part the second. */
1356 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1360 /* Avoid splitting if possible. */
1361 if (newpos2 == src2->head)
1366 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1367 src2->index, nmatch);
1368 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1372 fprintf (rtl_dump_file,
1373 "Cross jumping from bb %i to bb %i; %i common insns\n",
1374 src1->index, src2->index, nmatch);
1376 redirect_to->count += src1->count;
1377 redirect_to->frequency += src1->frequency;
1378 /* We may have some registers visible trought the block. */
1379 redirect_to->flags |= BB_DIRTY;
1381 /* Recompute the frequencies and counts of outgoing edges. */
1382 for (s = redirect_to->succ; s; s = s->succ_next)
1385 basic_block d = s->dest;
1387 if (FORWARDER_BLOCK_P (d))
1390 for (s2 = src1->succ; ; s2 = s2->succ_next)
1392 basic_block d2 = s2->dest;
1393 if (FORWARDER_BLOCK_P (d2))
1394 d2 = d2->succ->dest;
1399 s->count += s2->count;
1401 /* Take care to update possible forwarder blocks. We verified
1402 that there is no more than one in the chain, so we can't run
1403 into infinite loop. */
1404 if (FORWARDER_BLOCK_P (s->dest))
1406 s->dest->succ->count += s2->count;
1407 s->dest->count += s2->count;
1408 s->dest->frequency += EDGE_FREQUENCY (s);
1411 if (FORWARDER_BLOCK_P (s2->dest))
1413 s2->dest->succ->count -= s2->count;
1414 if (s2->dest->succ->count < 0)
1415 s2->dest->succ->count = 0;
1416 s2->dest->count -= s2->count;
1417 s2->dest->frequency -= EDGE_FREQUENCY (s);
1418 if (s2->dest->frequency < 0)
1419 s2->dest->frequency = 0;
1420 if (s2->dest->count < 0)
1421 s2->dest->count = 0;
1424 if (!redirect_to->frequency && !src1->frequency)
1425 s->probability = (s->probability + s2->probability) / 2;
1428 = ((s->probability * redirect_to->frequency +
1429 s2->probability * src1->frequency)
1430 / (redirect_to->frequency + src1->frequency));
1433 update_br_prob_note (redirect_to);
1435 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1437 /* Skip possible basic block header. */
1438 if (GET_CODE (newpos1) == CODE_LABEL)
1439 newpos1 = NEXT_INSN (newpos1);
1441 if (GET_CODE (newpos1) == NOTE)
1442 newpos1 = NEXT_INSN (newpos1);
1445 /* Emit the jump insn. */
1446 label = block_label (redirect_to);
1447 emit_jump_insn_after (gen_jump (label), src1->end);
1448 JUMP_LABEL (src1->end) = label;
1449 LABEL_NUSES (label)++;
1451 /* Delete the now unreachable instructions. */
1452 delete_insn_chain (newpos1, last);
1454 /* Make sure there is a barrier after the new jump. */
1455 last = next_nonnote_insn (src1->end);
1456 if (!last || GET_CODE (last) != BARRIER)
1457 emit_barrier_after (src1->end);
1461 remove_edge (src1->succ);
1462 make_single_succ_edge (src1, redirect_to, 0);
1464 update_forwarder_flag (src1);
1469 /* Search the predecessors of BB for common insn sequences. When found,
1470 share code between them by redirecting control flow. Return true if
1471 any changes made. */
1474 try_crossjump_bb (mode, bb)
1478 edge e, e2, nexte2, nexte, fallthru;
1482 /* Nothing to do if there is not at least two incoming edges. */
1483 if (!bb->pred || !bb->pred->pred_next)
1486 /* It is always cheapest to redirect a block that ends in a branch to
1487 a block that falls through into BB, as that adds no branches to the
1488 program. We'll try that combination first. */
1489 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next, n++)
1491 if (fallthru->flags & EDGE_FALLTHRU)
1498 for (e = bb->pred; e; e = nexte)
1500 nexte = e->pred_next;
1502 /* As noted above, first try with the fallthru predecessor. */
1505 /* Don't combine the fallthru edge into anything else.
1506 If there is a match, we'll do it the other way around. */
1510 if (try_crossjump_to_edge (mode, e, fallthru))
1518 /* Non-obvious work limiting check: Recognize that we're going
1519 to call try_crossjump_bb on every basic block. So if we have
1520 two blocks with lots of outgoing edges (a switch) and they
1521 share lots of common destinations, then we would do the
1522 cross-jump check once for each common destination.
1524 Now, if the blocks actually are cross-jump candidates, then
1525 all of their destinations will be shared. Which means that
1526 we only need check them for cross-jump candidacy once. We
1527 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1528 choosing to do the check from the block for which the edge
1529 in question is the first successor of A. */
1530 if (e->src->succ != e)
1533 for (e2 = bb->pred; e2; e2 = nexte2)
1535 nexte2 = e2->pred_next;
1540 /* We've already checked the fallthru edge above. */
1544 /* The "first successor" check above only prevents multiple
1545 checks of crossjump(A,B). In order to prevent redundant
1546 checks of crossjump(B,A), require that A be the block
1547 with the lowest index. */
1548 if (e->src->index > e2->src->index)
1551 if (try_crossjump_to_edge (mode, e, e2))
1563 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1564 instructions etc. Return nonzero if changes were made. */
1567 try_optimize_cfg (mode)
1570 bool changed_overall = false;
1575 if (mode & CLEANUP_CROSSJUMP)
1576 add_noreturn_fake_exit_edges ();
1579 update_forwarder_flag (bb);
1581 if (mode & CLEANUP_UPDATE_LIFE)
1584 if (! (* targetm.cannot_modify_jumps_p) ())
1586 /* Attempt to merge blocks as made possible by edge removal. If
1587 a block has only one successor, and the successor has only
1588 one predecessor, they may be combined. */
1595 fprintf (rtl_dump_file,
1596 "\n\ntry_optimize_cfg iteration %i\n\n",
1599 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1603 bool changed_here = false;
1605 /* Delete trivially dead basic blocks. */
1606 while (b->pred == NULL)
1610 fprintf (rtl_dump_file, "Deleting block %i.\n",
1613 flow_delete_block (b);
1618 /* Remove code labels no longer used. Don't do this
1619 before CALL_PLACEHOLDER is removed, as some branches
1620 may be hidden within. */
1621 if (b->pred->pred_next == NULL
1622 && (b->pred->flags & EDGE_FALLTHRU)
1623 && !(b->pred->flags & EDGE_COMPLEX)
1624 && GET_CODE (b->head) == CODE_LABEL
1625 && (!(mode & CLEANUP_PRE_SIBCALL)
1626 || !tail_recursion_label_p (b->head))
1627 /* If the previous block ends with a branch to this
1628 block, we can't delete the label. Normally this
1629 is a condjump that is yet to be simplified, but
1630 if CASE_DROPS_THRU, this can be a tablejump with
1631 some element going to the same place as the
1632 default (fallthru). */
1633 && (b->pred->src == ENTRY_BLOCK_PTR
1634 || GET_CODE (b->pred->src->end) != JUMP_INSN
1635 || ! label_is_jump_target_p (b->head,
1636 b->pred->src->end)))
1638 rtx label = b->head;
1640 b->head = NEXT_INSN (b->head);
1641 delete_insn_chain (label, label);
1643 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1647 /* If we fall through an empty block, we can remove it. */
1648 if (b->pred->pred_next == NULL
1649 && (b->pred->flags & EDGE_FALLTHRU)
1650 && GET_CODE (b->head) != CODE_LABEL
1651 && FORWARDER_BLOCK_P (b)
1652 /* Note that forwarder_block_p true ensures that
1653 there is a successor for this block. */
1654 && (b->succ->flags & EDGE_FALLTHRU)
1655 && n_basic_blocks > 1)
1658 fprintf (rtl_dump_file,
1659 "Deleting fallthru block %i.\n",
1662 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1663 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1664 flow_delete_block (b);
1669 /* Merge blocks. Loop because chains of blocks might be
1671 while ((s = b->succ) != NULL
1672 && s->succ_next == NULL
1673 && !(s->flags & EDGE_COMPLEX)
1674 && (c = s->dest) != EXIT_BLOCK_PTR
1675 && c->pred->pred_next == NULL
1677 /* If the jump insn has side effects,
1678 we can't kill the edge. */
1679 && (GET_CODE (b->end) != JUMP_INSN
1680 || simplejump_p (b->end))
1681 && merge_blocks (s, b, c, mode))
1682 changed_here = true;
1684 /* Simplify branch over branch. */
1685 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1686 changed_here = true;
1688 /* If B has a single outgoing edge, but uses a
1689 non-trivial jump instruction without side-effects, we
1690 can either delete the jump entirely, or replace it
1691 with a simple unconditional jump. Use
1692 redirect_edge_and_branch to do the dirty work. */
1694 && ! b->succ->succ_next
1695 && b->succ->dest != EXIT_BLOCK_PTR
1696 && onlyjump_p (b->end)
1697 && redirect_edge_and_branch (b->succ, b->succ->dest))
1699 update_forwarder_flag (b);
1700 changed_here = true;
1703 /* Simplify branch to branch. */
1704 if (try_forward_edges (mode, b))
1705 changed_here = true;
1707 /* Look for shared code between blocks. */
1708 if ((mode & CLEANUP_CROSSJUMP)
1709 && try_crossjump_bb (mode, b))
1710 changed_here = true;
1712 /* Don't get confused by the index shift caused by
1720 if ((mode & CLEANUP_CROSSJUMP)
1721 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1724 #ifdef ENABLE_CHECKING
1726 verify_flow_info ();
1729 changed_overall |= changed;
1734 if (mode & CLEANUP_CROSSJUMP)
1735 remove_fake_edges ();
1737 clear_aux_for_blocks ();
1739 return changed_overall;
1742 /* Delete all unreachable basic blocks. */
1745 delete_unreachable_blocks ()
1747 bool changed = false;
1748 basic_block b, next_bb;
1750 find_unreachable_blocks ();
1752 /* Delete all unreachable basic blocks. */
1754 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1756 next_bb = b->next_bb;
1758 if (!(b->flags & BB_REACHABLE))
1760 flow_delete_block (b);
1766 tidy_fallthru_edges ();
1770 /* Tidy the CFG by deleting unreachable code and whatnot. */
1776 bool changed = false;
1778 timevar_push (TV_CLEANUP_CFG);
1779 if (delete_unreachable_blocks ())
1782 /* We've possibly created trivially dead code. Cleanup it right
1783 now to introduce more oppurtunities for try_optimize_cfg. */
1784 if (!(mode & (CLEANUP_NO_INSN_DEL
1785 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
1786 && !reload_completed)
1787 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1792 while (try_optimize_cfg (mode))
1794 delete_unreachable_blocks (), changed = true;
1795 if (mode & CLEANUP_UPDATE_LIFE)
1797 /* Cleaning up CFG introduces more oppurtunities for dead code
1798 removal that in turn may introduce more oppurtunities for
1799 cleaning up the CFG. */
1800 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
1802 | PROP_SCAN_DEAD_CODE
1803 | PROP_KILL_DEAD_CODE
1807 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
1808 && !reload_completed)
1810 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1815 delete_dead_jumptables ();
1818 /* Kill the data we won't maintain. */
1819 free_EXPR_LIST_list (&label_value_list);
1820 timevar_pop (TV_CLEANUP_CFG);