1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
43 #include "insn-config.h"
52 /* cleanup_cfg maintains following flags for each basic block. */
56 /* Set if BB is the forwarder block to avoid too many
57 forwarder_block_p calls. */
58 BB_FORWARDER_BLOCK = 1,
59 BB_NONTHREADABLE_BLOCK = 2
62 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
63 #define BB_SET_FLAG(BB, FLAG) \
64 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
65 #define BB_CLEAR_FLAG(BB, FLAG) \
66 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
68 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
70 static bool try_crossjump_to_edge (int, edge, edge);
71 static bool try_crossjump_bb (int, basic_block);
72 static bool outgoing_edges_match (int, basic_block, basic_block);
73 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
74 static bool insns_match_p (int, rtx, rtx);
76 static bool label_is_jump_target_p (rtx, rtx);
77 static bool tail_recursion_label_p (rtx);
78 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
79 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
80 static basic_block merge_blocks (edge,basic_block,basic_block, int);
81 static bool try_optimize_cfg (int);
82 static bool try_simplify_condjump (basic_block);
83 static bool try_forward_edges (int, basic_block);
84 static edge thread_jump (int, edge, basic_block);
85 static bool mark_effect (rtx, bitmap);
86 static void notice_new_block (basic_block);
87 static void update_forwarder_flag (basic_block);
88 static int mentions_nonequal_regs (rtx *, void *);
90 /* Set flags for newly created block. */
93 notice_new_block (basic_block bb)
98 if (forwarder_block_p (bb))
99 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
102 /* Recompute forwarder flag after block has been modified. */
105 update_forwarder_flag (basic_block bb)
107 if (forwarder_block_p (bb))
108 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
110 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
113 /* Simplify a conditional jump around an unconditional jump.
114 Return true if something changed. */
117 try_simplify_condjump (basic_block cbranch_block)
119 basic_block jump_block, jump_dest_block, cbranch_dest_block;
120 edge cbranch_jump_edge, cbranch_fallthru_edge;
123 /* Verify that there are exactly two successors. */
124 if (!cbranch_block->succ
125 || !cbranch_block->succ->succ_next
126 || cbranch_block->succ->succ_next->succ_next)
129 /* Verify that we've got a normal conditional branch at the end
131 cbranch_insn = cbranch_block->end;
132 if (!any_condjump_p (cbranch_insn))
135 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
136 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
138 /* The next block must not have multiple predecessors, must not
139 be the last block in the function, and must contain just the
140 unconditional jump. */
141 jump_block = cbranch_fallthru_edge->dest;
142 if (jump_block->pred->pred_next
143 || jump_block->next_bb == EXIT_BLOCK_PTR
144 || !FORWARDER_BLOCK_P (jump_block))
146 jump_dest_block = jump_block->succ->dest;
148 /* The conditional branch must target the block after the
149 unconditional branch. */
150 cbranch_dest_block = cbranch_jump_edge->dest;
152 if (!can_fallthru (jump_block, cbranch_dest_block))
155 /* Invert the conditional branch. */
156 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
160 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
161 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
163 /* Success. Update the CFG to match. Note that after this point
164 the edge variable names appear backwards; the redirection is done
165 this way to preserve edge profile data. */
166 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
168 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
170 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
171 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
172 update_br_prob_note (cbranch_block);
174 /* Delete the block with the unconditional jump, and clean up the mess. */
175 delete_block (jump_block);
176 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
181 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
182 on register. Used by jump threading. */
185 mark_effect (rtx exp, regset nonequal)
189 switch (GET_CODE (exp))
191 /* In case we do clobber the register, mark it as equal, as we know the
192 value is dead so it don't have to match. */
194 if (REG_P (XEXP (exp, 0)))
196 dest = XEXP (exp, 0);
197 regno = REGNO (dest);
198 CLEAR_REGNO_REG_SET (nonequal, regno);
199 if (regno < FIRST_PSEUDO_REGISTER)
201 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
203 CLEAR_REGNO_REG_SET (nonequal, regno + n);
209 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
211 dest = SET_DEST (exp);
216 regno = REGNO (dest);
217 SET_REGNO_REG_SET (nonequal, regno);
218 if (regno < FIRST_PSEUDO_REGISTER)
220 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
222 SET_REGNO_REG_SET (nonequal, regno + n);
231 /* Return nonzero if X is an register set in regset DATA.
232 Called via for_each_rtx. */
234 mentions_nonequal_regs (rtx *x, void *data)
236 regset nonequal = (regset) data;
242 if (REGNO_REG_SET_P (nonequal, regno))
244 if (regno < FIRST_PSEUDO_REGISTER)
246 int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
248 if (REGNO_REG_SET_P (nonequal, regno + n))
254 /* Attempt to prove that the basic block B will have no side effects and
255 always continues in the same edge if reached via E. Return the edge
256 if exist, NULL otherwise. */
259 thread_jump (int mode, edge e, basic_block b)
261 rtx set1, set2, cond1, cond2, insn;
262 enum rtx_code code1, code2, reversed_code2;
263 bool reverse1 = false;
268 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
271 /* At the moment, we do handle only conditional jumps, but later we may
272 want to extend this code to tablejumps and others. */
273 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
275 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
277 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
281 /* Second branch must end with onlyjump, as we will eliminate the jump. */
282 if (!any_condjump_p (e->src->end))
285 if (!any_condjump_p (b->end) || !onlyjump_p (b->end))
287 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
291 set1 = pc_set (e->src->end);
292 set2 = pc_set (b->end);
293 if (((e->flags & EDGE_FALLTHRU) != 0)
294 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
297 cond1 = XEXP (SET_SRC (set1), 0);
298 cond2 = XEXP (SET_SRC (set2), 0);
300 code1 = reversed_comparison_code (cond1, e->src->end);
302 code1 = GET_CODE (cond1);
304 code2 = GET_CODE (cond2);
305 reversed_code2 = reversed_comparison_code (cond2, b->end);
307 if (!comparison_dominates_p (code1, code2)
308 && !comparison_dominates_p (code1, reversed_code2))
311 /* Ensure that the comparison operators are equivalent.
312 ??? This is far too pessimistic. We should allow swapped operands,
313 different CCmodes, or for example comparisons for interval, that
314 dominate even when operands are not equivalent. */
315 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
316 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
319 /* Short circuit cases where block B contains some side effects, as we can't
321 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
322 insn = NEXT_INSN (insn))
323 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
325 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
331 /* First process all values computed in the source basic block. */
332 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
333 insn = NEXT_INSN (insn))
335 cselib_process_insn (insn);
337 nonequal = BITMAP_XMALLOC();
338 CLEAR_REG_SET (nonequal);
340 /* Now assume that we've continued by the edge E to B and continue
341 processing as if it were same basic block.
342 Our goal is to prove that whole block is an NOOP. */
344 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
345 insn = NEXT_INSN (insn))
349 rtx pat = PATTERN (insn);
351 if (GET_CODE (pat) == PARALLEL)
353 for (i = 0; i < XVECLEN (pat, 0); i++)
354 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
357 failed |= mark_effect (pat, nonequal);
360 cselib_process_insn (insn);
363 /* Later we should clear nonequal of dead registers. So far we don't
364 have life information in cfg_cleanup. */
367 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
371 /* cond2 must not mention any register that is not equal to the
373 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
376 /* In case liveness information is available, we need to prove equivalence
377 only of the live values. */
378 if (mode & CLEANUP_UPDATE_LIFE)
379 AND_REG_SET (nonequal, b->global_live_at_end);
381 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
383 BITMAP_XFREE (nonequal);
385 if ((comparison_dominates_p (code1, code2) != 0)
386 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
387 return BRANCH_EDGE (b);
389 return FALLTHRU_EDGE (b);
392 BITMAP_XFREE (nonequal);
397 /* Attempt to forward edges leaving basic block B.
398 Return true if successful. */
401 try_forward_edges (int mode, basic_block b)
403 bool changed = false;
404 edge e, next, *threaded_edges = NULL;
406 for (e = b->succ; e; e = next)
408 basic_block target, first;
410 bool threaded = false;
411 int nthreaded_edges = 0;
415 /* Skip complex edges because we don't know how to update them.
417 Still handle fallthru edges, as we can succeed to forward fallthru
418 edge to the same place as the branch edge of conditional branch
419 and turn conditional branch to an unconditional branch. */
420 if (e->flags & EDGE_COMPLEX)
423 target = first = e->dest;
426 while (counter < n_basic_blocks)
428 basic_block new_target = NULL;
429 bool new_target_threaded = false;
431 if (FORWARDER_BLOCK_P (target)
432 && target->succ->dest != EXIT_BLOCK_PTR)
434 /* Bypass trivial infinite loops. */
435 if (target == target->succ->dest)
436 counter = n_basic_blocks;
437 new_target = target->succ->dest;
440 /* Allow to thread only over one edge at time to simplify updating
442 else if (mode & CLEANUP_THREADING)
444 edge t = thread_jump (mode, e, target);
448 threaded_edges = xmalloc (sizeof (*threaded_edges)
454 /* Detect an infinite loop across blocks not
455 including the start block. */
456 for (i = 0; i < nthreaded_edges; ++i)
457 if (threaded_edges[i] == t)
459 if (i < nthreaded_edges)
461 counter = n_basic_blocks;
466 /* Detect an infinite loop across the start block. */
470 if (nthreaded_edges >= n_basic_blocks)
472 threaded_edges[nthreaded_edges++] = t;
474 new_target = t->dest;
475 new_target_threaded = true;
482 /* Avoid killing of loop pre-headers, as it is the place loop
483 optimizer wants to hoist code to.
485 For fallthru forwarders, the LOOP_BEG note must appear between
486 the header of block and CODE_LABEL of the loop, for non forwarders
487 it must appear before the JUMP_INSN. */
488 if ((mode & CLEANUP_PRE_LOOP) && optimize)
490 rtx insn = (target->succ->flags & EDGE_FALLTHRU
491 ? target->head : prev_nonnote_insn (target->end));
493 if (GET_CODE (insn) != NOTE)
494 insn = NEXT_INSN (insn);
496 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
497 insn = NEXT_INSN (insn))
498 if (GET_CODE (insn) == NOTE
499 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
502 if (GET_CODE (insn) == NOTE)
505 /* Do not clean up branches to just past the end of a loop
506 at this time; it can mess up the loop optimizer's
507 recognition of some patterns. */
509 insn = PREV_INSN (target->head);
510 if (insn && GET_CODE (insn) == NOTE
511 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
517 threaded |= new_target_threaded;
520 if (counter >= n_basic_blocks)
523 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
526 else if (target == first)
527 ; /* We didn't do anything. */
530 /* Save the values now, as the edge may get removed. */
531 gcov_type edge_count = e->count;
532 int edge_probability = e->probability;
536 /* Don't force if target is exit block. */
537 if (threaded && target != EXIT_BLOCK_PTR)
539 notice_new_block (redirect_edge_and_branch_force (e, target));
541 fprintf (rtl_dump_file, "Conditionals threaded.\n");
543 else if (!redirect_edge_and_branch (e, target))
546 fprintf (rtl_dump_file,
547 "Forwarding edge %i->%i to %i failed.\n",
548 b->index, e->dest->index, target->index);
552 /* We successfully forwarded the edge. Now update profile
553 data: for each edge we traversed in the chain, remove
554 the original edge's execution count. */
555 edge_frequency = ((edge_probability * b->frequency
556 + REG_BR_PROB_BASE / 2)
559 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
560 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
566 first->count -= edge_count;
567 if (first->count < 0)
569 first->frequency -= edge_frequency;
570 if (first->frequency < 0)
571 first->frequency = 0;
572 if (first->succ->succ_next)
576 if (n >= nthreaded_edges)
578 t = threaded_edges [n++];
581 if (first->frequency)
582 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
585 if (prob > t->probability)
586 prob = t->probability;
587 t->probability -= prob;
588 prob = REG_BR_PROB_BASE - prob;
591 first->succ->probability = REG_BR_PROB_BASE;
592 first->succ->succ_next->probability = 0;
595 for (e = first->succ; e; e = e->succ_next)
596 e->probability = ((e->probability * REG_BR_PROB_BASE)
598 update_br_prob_note (first);
602 /* It is possible that as the result of
603 threading we've removed edge as it is
604 threaded to the fallthru edge. Avoid
605 getting out of sync. */
606 if (n < nthreaded_edges
607 && first == threaded_edges [n]->src)
612 t->count -= edge_count;
617 while (first != target);
624 free (threaded_edges);
628 /* Return true if LABEL is a target of JUMP_INSN. This applies only
629 to non-complex jumps. That is, direct unconditional, conditional,
630 and tablejumps, but not computed jumps or returns. It also does
631 not apply to the fallthru case of a conditional jump. */
634 label_is_jump_target_p (rtx label, rtx jump_insn)
636 rtx tmp = JUMP_LABEL (jump_insn);
641 if (tablejump_p (jump_insn, NULL, &tmp))
643 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
644 int i, veclen = GET_NUM_ELEM (vec);
646 for (i = 0; i < veclen; ++i)
647 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
654 /* Return true if LABEL is used for tail recursion. */
657 tail_recursion_label_p (rtx label)
661 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
662 if (label == XEXP (x, 0))
668 /* Blocks A and B are to be merged into a single block. A has no incoming
669 fallthru edge, so it can be moved before B without adding or modifying
670 any jumps (aside from the jump from A to B). */
673 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
677 barrier = next_nonnote_insn (a->end);
678 if (GET_CODE (barrier) != BARRIER)
680 delete_insn (barrier);
682 /* Move block and loop notes out of the chain so that we do not
685 ??? A better solution would be to squeeze out all the non-nested notes
686 and adjust the block trees appropriately. Even better would be to have
687 a tighter connection between block trees and rtl so that this is not
689 if (squeeze_notes (&a->head, &a->end))
692 /* Scramble the insn chain. */
693 if (a->end != PREV_INSN (b->head))
694 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
695 a->flags |= BB_DIRTY;
698 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
701 /* Swap the records for the two blocks around. */
704 link_block (a, b->prev_bb);
706 /* Now blocks A and B are contiguous. Merge them. */
707 merge_blocks_nomove (a, b);
710 /* Blocks A and B are to be merged into a single block. B has no outgoing
711 fallthru edge, so it can be moved after A without adding or modifying
712 any jumps (aside from the jump from A to B). */
715 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
717 rtx barrier, real_b_end;
720 barrier = NEXT_INSN (b->end);
722 /* Recognize a jump table following block B. */
724 && GET_CODE (barrier) == CODE_LABEL
725 && NEXT_INSN (barrier)
726 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
727 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
728 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
730 /* Temporarily add the table jump insn to b, so that it will also
731 be moved to the correct location. */
732 b->end = NEXT_INSN (barrier);
733 barrier = NEXT_INSN (b->end);
736 /* There had better have been a barrier there. Delete it. */
737 if (barrier && GET_CODE (barrier) == BARRIER)
738 delete_insn (barrier);
740 /* Move block and loop notes out of the chain so that we do not
743 ??? A better solution would be to squeeze out all the non-nested notes
744 and adjust the block trees appropriately. Even better would be to have
745 a tighter connection between block trees and rtl so that this is not
747 if (squeeze_notes (&b->head, &b->end))
750 /* Scramble the insn chain. */
751 reorder_insns_nobb (b->head, b->end, a->end);
753 /* Restore the real end of b. */
757 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
760 /* Now blocks A and B are contiguous. Merge them. */
761 merge_blocks_nomove (a, b);
764 /* Attempt to merge basic blocks that are potentially non-adjacent.
765 Return NULL iff the attempt failed, otherwise return basic block
766 where cleanup_cfg should continue. Because the merging commonly
767 moves basic block away or introduces another optimization
768 possiblity, return basic block just before B so cleanup_cfg don't
771 It may be good idea to return basic block before C in the case
772 C has been moved after B and originally appeared earlier in the
773 insn seqeunce, but we have no infromation available about the
774 relative ordering of these two. Hopefully it is not too common. */
777 merge_blocks (edge e, basic_block b, basic_block c, int mode)
780 /* If C has a tail recursion label, do not merge. There is no
781 edge recorded from the call_placeholder back to this label, as
782 that would make optimize_sibling_and_tail_recursive_calls more
783 complex for no gain. */
784 if ((mode & CLEANUP_PRE_SIBCALL)
785 && GET_CODE (c->head) == CODE_LABEL
786 && tail_recursion_label_p (c->head))
789 /* If B has a fallthru edge to C, no need to move anything. */
790 if (e->flags & EDGE_FALLTHRU)
792 int b_index = b->index, c_index = c->index;
793 merge_blocks_nomove (b, c);
794 update_forwarder_flag (b);
797 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
800 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
803 /* Otherwise we will need to move code around. Do that only if expensive
804 transformations are allowed. */
805 else if (mode & CLEANUP_EXPENSIVE)
807 edge tmp_edge, b_fallthru_edge;
808 bool c_has_outgoing_fallthru;
809 bool b_has_incoming_fallthru;
811 /* Avoid overactive code motion, as the forwarder blocks should be
812 eliminated by edge redirection instead. One exception might have
813 been if B is a forwarder block and C has no fallthru edge, but
814 that should be cleaned up by bb-reorder instead. */
815 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
818 /* We must make sure to not munge nesting of lexical blocks,
819 and loop notes. This is done by squeezing out all the notes
820 and leaving them there to lie. Not ideal, but functional. */
822 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
823 if (tmp_edge->flags & EDGE_FALLTHRU)
826 c_has_outgoing_fallthru = (tmp_edge != NULL);
828 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
829 if (tmp_edge->flags & EDGE_FALLTHRU)
832 b_has_incoming_fallthru = (tmp_edge != NULL);
833 b_fallthru_edge = tmp_edge;
836 next = next->prev_bb;
838 /* Otherwise, we're going to try to move C after B. If C does
839 not have an outgoing fallthru, then it can be moved
840 immediately after B without introducing or modifying jumps. */
841 if (! c_has_outgoing_fallthru)
843 merge_blocks_move_successor_nojumps (b, c);
844 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
847 /* If B does not have an incoming fallthru, then it can be moved
848 immediately before C without introducing or modifying jumps.
849 C cannot be the first block, so we do not have to worry about
850 accessing a non-existent block. */
852 if (b_has_incoming_fallthru)
856 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
858 bb = force_nonfallthru (b_fallthru_edge);
860 notice_new_block (bb);
863 merge_blocks_move_predecessor_nojumps (b, c);
864 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
871 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
874 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
878 /* Verify that I1 and I2 are equivalent. */
879 if (GET_CODE (i1) != GET_CODE (i2))
885 if (GET_CODE (p1) != GET_CODE (p2))
888 /* If this is a CALL_INSN, compare register usage information.
889 If we don't check this on stack register machines, the two
890 CALL_INSNs might be merged leaving reg-stack.c with mismatching
891 numbers of stack registers in the same basic block.
892 If we don't check this on machines with delay slots, a delay slot may
893 be filled that clobbers a parameter expected by the subroutine.
895 ??? We take the simple route for now and assume that if they're
896 equal, they were constructed identically. */
898 if (GET_CODE (i1) == CALL_INSN
899 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
900 CALL_INSN_FUNCTION_USAGE (i2))
901 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
905 /* If cross_jump_death_matters is not 0, the insn's mode
906 indicates whether or not the insn contains any stack-like
909 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
911 /* If register stack conversion has already been done, then
912 death notes must also be compared before it is certain that
913 the two instruction streams match. */
916 HARD_REG_SET i1_regset, i2_regset;
918 CLEAR_HARD_REG_SET (i1_regset);
919 CLEAR_HARD_REG_SET (i2_regset);
921 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
922 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
923 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
925 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
926 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
927 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
929 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
939 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
942 /* Do not do EQUIV substitution after reload. First, we're undoing the
943 work of reload_cse. Second, we may be undoing the work of the post-
944 reload splitting pass. */
945 /* ??? Possibly add a new phase switch variable that can be used by
946 targets to disallow the troublesome insns after splitting. */
947 if (!reload_completed)
949 /* The following code helps take care of G++ cleanups. */
950 rtx equiv1 = find_reg_equal_equiv_note (i1);
951 rtx equiv2 = find_reg_equal_equiv_note (i2);
954 /* If the equivalences are not to a constant, they may
955 reference pseudos that no longer exist, so we can't
957 && (! reload_completed
958 || (CONSTANT_P (XEXP (equiv1, 0))
959 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
961 rtx s1 = single_set (i1);
962 rtx s2 = single_set (i2);
963 if (s1 != 0 && s2 != 0
964 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
966 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
967 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
968 if (! rtx_renumbered_equal_p (p1, p2))
970 else if (apply_change_group ())
979 /* Look through the insns at the end of BB1 and BB2 and find the longest
980 sequence that are equivalent. Store the first insns for that sequence
981 in *F1 and *F2 and return the sequence length.
983 To simplify callers of this function, if the blocks match exactly,
984 store the head of the blocks in *F1 and *F2. */
987 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
988 basic_block bb2, rtx *f1, rtx *f2)
990 rtx i1, i2, last1, last2, afterlast1, afterlast2;
993 /* Skip simple jumps at the end of the blocks. Complex jumps still
994 need to be compared for equivalence, which we'll do below. */
997 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
999 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1002 i1 = PREV_INSN (i1);
1007 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1010 /* Count everything except for unconditional jump as insn. */
1011 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1013 i2 = PREV_INSN (i2);
1019 while (!INSN_P (i1) && i1 != bb1->head)
1020 i1 = PREV_INSN (i1);
1022 while (!INSN_P (i2) && i2 != bb2->head)
1023 i2 = PREV_INSN (i2);
1025 if (i1 == bb1->head || i2 == bb2->head)
1028 if (!insns_match_p (mode, i1, i2))
1031 /* Don't begin a cross-jump with a NOTE insn. */
1034 /* If the merged insns have different REG_EQUAL notes, then
1036 rtx equiv1 = find_reg_equal_equiv_note (i1);
1037 rtx equiv2 = find_reg_equal_equiv_note (i2);
1039 if (equiv1 && !equiv2)
1040 remove_note (i1, equiv1);
1041 else if (!equiv1 && equiv2)
1042 remove_note (i2, equiv2);
1043 else if (equiv1 && equiv2
1044 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1046 remove_note (i1, equiv1);
1047 remove_note (i2, equiv2);
1050 afterlast1 = last1, afterlast2 = last2;
1051 last1 = i1, last2 = i2;
1055 i1 = PREV_INSN (i1);
1056 i2 = PREV_INSN (i2);
1060 /* Don't allow the insn after a compare to be shared by
1061 cross-jumping unless the compare is also shared. */
1062 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1063 last1 = afterlast1, last2 = afterlast2, ninsns--;
1066 /* Include preceding notes and labels in the cross-jump. One,
1067 this may bring us to the head of the blocks as requested above.
1068 Two, it keeps line number notes as matched as may be. */
1071 while (last1 != bb1->head && !INSN_P (PREV_INSN (last1)))
1072 last1 = PREV_INSN (last1);
1074 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1075 last1 = PREV_INSN (last1);
1077 while (last2 != bb2->head && !INSN_P (PREV_INSN (last2)))
1078 last2 = PREV_INSN (last2);
1080 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1081 last2 = PREV_INSN (last2);
1090 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1091 the branch instruction. This means that if we commonize the control
1092 flow before end of the basic block, the semantic remains unchanged.
1094 We may assume that there exists one edge with a common destination. */
1097 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1099 int nehedges1 = 0, nehedges2 = 0;
1100 edge fallthru1 = 0, fallthru2 = 0;
1103 /* If BB1 has only one successor, we may be looking at either an
1104 unconditional jump, or a fake edge to exit. */
1105 if (bb1->succ && !bb1->succ->succ_next
1106 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1107 && (GET_CODE (bb1->end) != JUMP_INSN || simplejump_p (bb1->end)))
1108 return (bb2->succ && !bb2->succ->succ_next
1109 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1110 && (GET_CODE (bb2->end) != JUMP_INSN || simplejump_p (bb2->end)));
1112 /* Match conditional jumps - this may get tricky when fallthru and branch
1113 edges are crossed. */
1115 && bb1->succ->succ_next
1116 && !bb1->succ->succ_next->succ_next
1117 && any_condjump_p (bb1->end)
1118 && onlyjump_p (bb1->end))
1120 edge b1, f1, b2, f2;
1121 bool reverse, match;
1122 rtx set1, set2, cond1, cond2;
1123 enum rtx_code code1, code2;
1126 || !bb2->succ->succ_next
1127 || bb2->succ->succ_next->succ_next
1128 || !any_condjump_p (bb2->end)
1129 || !onlyjump_p (bb2->end))
1132 b1 = BRANCH_EDGE (bb1);
1133 b2 = BRANCH_EDGE (bb2);
1134 f1 = FALLTHRU_EDGE (bb1);
1135 f2 = FALLTHRU_EDGE (bb2);
1137 /* Get around possible forwarders on fallthru edges. Other cases
1138 should be optimized out already. */
1139 if (FORWARDER_BLOCK_P (f1->dest))
1140 f1 = f1->dest->succ;
1142 if (FORWARDER_BLOCK_P (f2->dest))
1143 f2 = f2->dest->succ;
1145 /* To simplify use of this function, return false if there are
1146 unneeded forwarder blocks. These will get eliminated later
1147 during cleanup_cfg. */
1148 if (FORWARDER_BLOCK_P (f1->dest)
1149 || FORWARDER_BLOCK_P (f2->dest)
1150 || FORWARDER_BLOCK_P (b1->dest)
1151 || FORWARDER_BLOCK_P (b2->dest))
1154 if (f1->dest == f2->dest && b1->dest == b2->dest)
1156 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1161 set1 = pc_set (bb1->end);
1162 set2 = pc_set (bb2->end);
1163 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1164 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1167 cond1 = XEXP (SET_SRC (set1), 0);
1168 cond2 = XEXP (SET_SRC (set2), 0);
1169 code1 = GET_CODE (cond1);
1171 code2 = reversed_comparison_code (cond2, bb2->end);
1173 code2 = GET_CODE (cond2);
1175 if (code2 == UNKNOWN)
1178 /* Verify codes and operands match. */
1179 match = ((code1 == code2
1180 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1181 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1182 || (code1 == swap_condition (code2)
1183 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1185 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1188 /* If we return true, we will join the blocks. Which means that
1189 we will only have one branch prediction bit to work with. Thus
1190 we require the existing branches to have probabilities that are
1194 && maybe_hot_bb_p (bb1)
1195 && maybe_hot_bb_p (bb2))
1199 if (b1->dest == b2->dest)
1200 prob2 = b2->probability;
1202 /* Do not use f2 probability as f2 may be forwarded. */
1203 prob2 = REG_BR_PROB_BASE - b2->probability;
1205 /* Fail if the difference in probabilities is greater than 50%.
1206 This rules out two well-predicted branches with opposite
1208 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1211 fprintf (rtl_dump_file,
1212 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1213 bb1->index, bb2->index, b1->probability, prob2);
1219 if (rtl_dump_file && match)
1220 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1221 bb1->index, bb2->index);
1226 /* Generic case - we are seeing a computed jump, table jump or trapping
1229 #ifndef CASE_DROPS_THROUGH
1230 /* Check whether there are tablejumps in the end of BB1 and BB2.
1231 Return true if they are identical. */
1236 if (tablejump_p (bb1->end, &label1, &table1)
1237 && tablejump_p (bb2->end, &label2, &table2)
1238 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1240 /* The labels should never be the same rtx. If they really are same
1241 the jump tables are same too. So disable crossjumping of blocks BB1
1242 and BB2 because when deleting the common insns in the end of BB1
1243 by delete_block () the jump table would be deleted too. */
1244 /* If LABEL2 is referenced in BB1->END do not do anything
1245 because we would loose information when replacing
1246 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1247 if (label1 != label2 && !rtx_referenced_p (label2, bb1->end))
1249 /* Set IDENTICAL to true when the tables are identical. */
1250 bool identical = false;
1253 p1 = PATTERN (table1);
1254 p2 = PATTERN (table2);
1255 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1259 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1260 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1261 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1262 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1267 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1268 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1274 replace_label_data rr;
1277 /* Temporarily replace references to LABEL1 with LABEL2
1278 in BB1->END so that we could compare the instructions. */
1281 rr.update_label_nuses = false;
1282 for_each_rtx (&bb1->end, replace_label, &rr);
1284 match = insns_match_p (mode, bb1->end, bb2->end);
1285 if (rtl_dump_file && match)
1286 fprintf (rtl_dump_file,
1287 "Tablejumps in bb %i and %i match.\n",
1288 bb1->index, bb2->index);
1290 /* Set the original label in BB1->END because when deleting
1291 a block whose end is a tablejump, the tablejump referenced
1292 from the instruction is deleted too. */
1295 for_each_rtx (&bb1->end, replace_label, &rr);
1305 /* First ensure that the instructions match. There may be many outgoing
1306 edges so this test is generally cheaper. */
1307 if (!insns_match_p (mode, bb1->end, bb2->end))
1310 /* Search the outgoing edges, ensure that the counts do match, find possible
1311 fallthru and exception handling edges since these needs more
1313 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1314 e1 = e1->succ_next, e2 = e2->succ_next)
1316 if (e1->flags & EDGE_EH)
1319 if (e2->flags & EDGE_EH)
1322 if (e1->flags & EDGE_FALLTHRU)
1324 if (e2->flags & EDGE_FALLTHRU)
1328 /* If number of edges of various types does not match, fail. */
1330 || nehedges1 != nehedges2
1331 || (fallthru1 != 0) != (fallthru2 != 0))
1334 /* fallthru edges must be forwarded to the same destination. */
1337 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1338 ? fallthru1->dest->succ->dest: fallthru1->dest);
1339 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1340 ? fallthru2->dest->succ->dest: fallthru2->dest);
1346 /* In case we do have EH edges, ensure we are in the same region. */
1349 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1350 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1352 if (XEXP (n1, 0) != XEXP (n2, 0))
1356 /* We don't need to match the rest of edges as above checks should be enought
1357 to ensure that they are equivalent. */
1361 /* E1 and E2 are edges with the same destination block. Search their
1362 predecessors for common code. If found, redirect control flow from
1363 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1366 try_crossjump_to_edge (int mode, edge e1, edge e2)
1369 basic_block src1 = e1->src, src2 = e2->src;
1370 basic_block redirect_to, redirect_from, to_remove;
1371 rtx newpos1, newpos2;
1374 /* Search backward through forwarder blocks. We don't need to worry
1375 about multiple entry or chained forwarders, as they will be optimized
1376 away. We do this to look past the unconditional jump following a
1377 conditional jump that is required due to the current CFG shape. */
1379 && !src1->pred->pred_next
1380 && FORWARDER_BLOCK_P (src1))
1381 e1 = src1->pred, src1 = e1->src;
1384 && !src2->pred->pred_next
1385 && FORWARDER_BLOCK_P (src2))
1386 e2 = src2->pred, src2 = e2->src;
1388 /* Nothing to do if we reach ENTRY, or a common source block. */
1389 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1394 /* Seeing more than 1 forwarder blocks would confuse us later... */
1395 if (FORWARDER_BLOCK_P (e1->dest)
1396 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1399 if (FORWARDER_BLOCK_P (e2->dest)
1400 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1403 /* Likewise with dead code (possibly newly created by the other optimizations
1405 if (!src1->pred || !src2->pred)
1408 /* Look for the common insn sequence, part the first ... */
1409 if (!outgoing_edges_match (mode, src1, src2))
1412 /* ... and part the second. */
1413 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1417 #ifndef CASE_DROPS_THROUGH
1418 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1420 If we have tablejumps in the end of SRC1 and SRC2
1421 they have been already compared for equivalence in outgoing_edges_match ()
1422 so replace the references to TABLE1 by references to TABLE2. */
1427 if (tablejump_p (src1->end, &label1, &table1)
1428 && tablejump_p (src2->end, &label2, &table2)
1429 && label1 != label2)
1431 replace_label_data rr;
1434 /* Replace references to LABEL1 with LABEL2. */
1437 rr.update_label_nuses = true;
1438 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1440 /* Do not replace the label in SRC1->END because when deleting
1441 a block whose end is a tablejump, the tablejump referenced
1442 from the instruction is deleted too. */
1443 if (insn != src1->end)
1444 for_each_rtx (&insn, replace_label, &rr);
1450 /* Avoid splitting if possible. */
1451 if (newpos2 == src2->head)
1456 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1457 src2->index, nmatch);
1458 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1462 fprintf (rtl_dump_file,
1463 "Cross jumping from bb %i to bb %i; %i common insns\n",
1464 src1->index, src2->index, nmatch);
1466 redirect_to->count += src1->count;
1467 redirect_to->frequency += src1->frequency;
1468 /* We may have some registers visible trought the block. */
1469 redirect_to->flags |= BB_DIRTY;
1471 /* Recompute the frequencies and counts of outgoing edges. */
1472 for (s = redirect_to->succ; s; s = s->succ_next)
1475 basic_block d = s->dest;
1477 if (FORWARDER_BLOCK_P (d))
1480 for (s2 = src1->succ; ; s2 = s2->succ_next)
1482 basic_block d2 = s2->dest;
1483 if (FORWARDER_BLOCK_P (d2))
1484 d2 = d2->succ->dest;
1489 s->count += s2->count;
1491 /* Take care to update possible forwarder blocks. We verified
1492 that there is no more than one in the chain, so we can't run
1493 into infinite loop. */
1494 if (FORWARDER_BLOCK_P (s->dest))
1496 s->dest->succ->count += s2->count;
1497 s->dest->count += s2->count;
1498 s->dest->frequency += EDGE_FREQUENCY (s);
1501 if (FORWARDER_BLOCK_P (s2->dest))
1503 s2->dest->succ->count -= s2->count;
1504 if (s2->dest->succ->count < 0)
1505 s2->dest->succ->count = 0;
1506 s2->dest->count -= s2->count;
1507 s2->dest->frequency -= EDGE_FREQUENCY (s);
1508 if (s2->dest->frequency < 0)
1509 s2->dest->frequency = 0;
1510 if (s2->dest->count < 0)
1511 s2->dest->count = 0;
1514 if (!redirect_to->frequency && !src1->frequency)
1515 s->probability = (s->probability + s2->probability) / 2;
1518 = ((s->probability * redirect_to->frequency +
1519 s2->probability * src1->frequency)
1520 / (redirect_to->frequency + src1->frequency));
1523 update_br_prob_note (redirect_to);
1525 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1527 /* Skip possible basic block header. */
1528 if (GET_CODE (newpos1) == CODE_LABEL)
1529 newpos1 = NEXT_INSN (newpos1);
1531 if (GET_CODE (newpos1) == NOTE)
1532 newpos1 = NEXT_INSN (newpos1);
1534 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1535 to_remove = redirect_from->succ->dest;
1537 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1538 delete_block (to_remove);
1540 update_forwarder_flag (redirect_from);
1545 /* Search the predecessors of BB for common insn sequences. When found,
1546 share code between them by redirecting control flow. Return true if
1547 any changes made. */
1550 try_crossjump_bb (int mode, basic_block bb)
1552 edge e, e2, nexte2, nexte, fallthru;
1556 /* Nothing to do if there is not at least two incoming edges. */
1557 if (!bb->pred || !bb->pred->pred_next)
1560 /* It is always cheapest to redirect a block that ends in a branch to
1561 a block that falls through into BB, as that adds no branches to the
1562 program. We'll try that combination first. */
1564 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1565 for (e = bb->pred; e ; e = e->pred_next, n++)
1567 if (e->flags & EDGE_FALLTHRU)
1574 for (e = bb->pred; e; e = nexte)
1576 nexte = e->pred_next;
1578 /* As noted above, first try with the fallthru predecessor. */
1581 /* Don't combine the fallthru edge into anything else.
1582 If there is a match, we'll do it the other way around. */
1586 if (try_crossjump_to_edge (mode, e, fallthru))
1594 /* Non-obvious work limiting check: Recognize that we're going
1595 to call try_crossjump_bb on every basic block. So if we have
1596 two blocks with lots of outgoing edges (a switch) and they
1597 share lots of common destinations, then we would do the
1598 cross-jump check once for each common destination.
1600 Now, if the blocks actually are cross-jump candidates, then
1601 all of their destinations will be shared. Which means that
1602 we only need check them for cross-jump candidacy once. We
1603 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1604 choosing to do the check from the block for which the edge
1605 in question is the first successor of A. */
1606 if (e->src->succ != e)
1609 for (e2 = bb->pred; e2; e2 = nexte2)
1611 nexte2 = e2->pred_next;
1616 /* We've already checked the fallthru edge above. */
1620 /* The "first successor" check above only prevents multiple
1621 checks of crossjump(A,B). In order to prevent redundant
1622 checks of crossjump(B,A), require that A be the block
1623 with the lowest index. */
1624 if (e->src->index > e2->src->index)
1627 if (try_crossjump_to_edge (mode, e, e2))
1639 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1640 instructions etc. Return nonzero if changes were made. */
1643 try_optimize_cfg (int mode)
1645 bool changed_overall = false;
1648 basic_block bb, b, next;
1650 if (mode & CLEANUP_CROSSJUMP)
1651 add_noreturn_fake_exit_edges ();
1654 update_forwarder_flag (bb);
1656 if (mode & CLEANUP_UPDATE_LIFE)
1659 if (! (* targetm.cannot_modify_jumps_p) ())
1661 /* Attempt to merge blocks as made possible by edge removal. If
1662 a block has only one successor, and the successor has only
1663 one predecessor, they may be combined. */
1670 fprintf (rtl_dump_file,
1671 "\n\ntry_optimize_cfg iteration %i\n\n",
1674 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1678 bool changed_here = false;
1680 /* Delete trivially dead basic blocks. */
1681 while (b->pred == NULL)
1685 fprintf (rtl_dump_file, "Deleting block %i.\n",
1693 /* Remove code labels no longer used. Don't do this
1694 before CALL_PLACEHOLDER is removed, as some branches
1695 may be hidden within. */
1696 if (b->pred->pred_next == NULL
1697 && (b->pred->flags & EDGE_FALLTHRU)
1698 && !(b->pred->flags & EDGE_COMPLEX)
1699 && GET_CODE (b->head) == CODE_LABEL
1700 && (!(mode & CLEANUP_PRE_SIBCALL)
1701 || !tail_recursion_label_p (b->head))
1702 /* If the previous block ends with a branch to this
1703 block, we can't delete the label. Normally this
1704 is a condjump that is yet to be simplified, but
1705 if CASE_DROPS_THRU, this can be a tablejump with
1706 some element going to the same place as the
1707 default (fallthru). */
1708 && (b->pred->src == ENTRY_BLOCK_PTR
1709 || GET_CODE (b->pred->src->end) != JUMP_INSN
1710 || ! label_is_jump_target_p (b->head,
1711 b->pred->src->end)))
1713 rtx label = b->head;
1715 b->head = NEXT_INSN (b->head);
1716 delete_insn_chain (label, label);
1718 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1722 /* If we fall through an empty block, we can remove it. */
1723 if (b->pred->pred_next == NULL
1724 && (b->pred->flags & EDGE_FALLTHRU)
1725 && GET_CODE (b->head) != CODE_LABEL
1726 && FORWARDER_BLOCK_P (b)
1727 /* Note that forwarder_block_p true ensures that
1728 there is a successor for this block. */
1729 && (b->succ->flags & EDGE_FALLTHRU)
1730 && n_basic_blocks > 1)
1733 fprintf (rtl_dump_file,
1734 "Deleting fallthru block %i.\n",
1737 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1738 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1744 if ((s = b->succ) != NULL
1745 && s->succ_next == NULL
1746 && !(s->flags & EDGE_COMPLEX)
1747 && (c = s->dest) != EXIT_BLOCK_PTR
1748 && c->pred->pred_next == NULL
1750 /* If the jump insn has side effects,
1751 we can't kill the edge. */
1752 && (GET_CODE (b->end) != JUMP_INSN
1754 ? simplejump_p (b->end)
1755 : onlyjump_p (b->end)))
1756 && (next = merge_blocks (s, b, c, mode)))
1759 changed_here = true;
1762 /* Simplify branch over branch. */
1763 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1764 changed_here = true;
1766 /* If B has a single outgoing edge, but uses a
1767 non-trivial jump instruction without side-effects, we
1768 can either delete the jump entirely, or replace it
1769 with a simple unconditional jump. Use
1770 redirect_edge_and_branch to do the dirty work. */
1772 && ! b->succ->succ_next
1773 && b->succ->dest != EXIT_BLOCK_PTR
1774 && onlyjump_p (b->end)
1775 && redirect_edge_and_branch (b->succ, b->succ->dest))
1777 update_forwarder_flag (b);
1778 changed_here = true;
1781 /* Simplify branch to branch. */
1782 if (try_forward_edges (mode, b))
1783 changed_here = true;
1785 /* Look for shared code between blocks. */
1786 if ((mode & CLEANUP_CROSSJUMP)
1787 && try_crossjump_bb (mode, b))
1788 changed_here = true;
1790 /* Don't get confused by the index shift caused by
1798 if ((mode & CLEANUP_CROSSJUMP)
1799 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1802 #ifdef ENABLE_CHECKING
1804 verify_flow_info ();
1807 changed_overall |= changed;
1812 if (mode & CLEANUP_CROSSJUMP)
1813 remove_fake_edges ();
1815 clear_aux_for_blocks ();
1817 return changed_overall;
1820 /* Delete all unreachable basic blocks. */
1823 delete_unreachable_blocks (void)
1825 bool changed = false;
1826 basic_block b, next_bb;
1828 find_unreachable_blocks ();
1830 /* Delete all unreachable basic blocks. */
1832 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1834 next_bb = b->next_bb;
1836 if (!(b->flags & BB_REACHABLE))
1844 tidy_fallthru_edges ();
1848 /* Tidy the CFG by deleting unreachable code and whatnot. */
1851 cleanup_cfg (int mode)
1853 bool changed = false;
1855 timevar_push (TV_CLEANUP_CFG);
1856 if (delete_unreachable_blocks ())
1859 /* We've possibly created trivially dead code. Cleanup it right
1860 now to introduce more opportunities for try_optimize_cfg. */
1861 if (!(mode & (CLEANUP_NO_INSN_DEL
1862 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
1863 && !reload_completed)
1864 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1869 while (try_optimize_cfg (mode))
1871 delete_unreachable_blocks (), changed = true;
1872 if (mode & CLEANUP_UPDATE_LIFE)
1874 /* Cleaning up CFG introduces more opportunities for dead code
1875 removal that in turn may introduce more opportunities for
1876 cleaning up the CFG. */
1877 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
1879 | PROP_SCAN_DEAD_CODE
1880 | PROP_KILL_DEAD_CODE
1884 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
1885 && (mode & CLEANUP_EXPENSIVE)
1886 && !reload_completed)
1888 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1893 delete_dead_jumptables ();
1896 /* Kill the data we won't maintain. */
1897 free_EXPR_LIST_list (&label_value_list);
1898 timevar_pop (TV_CLEANUP_CFG);