1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
51 /* cleanup_cfg maintains following flags for each basic block. */
55 /* Set if BB is the forwarder block to avoid too many
56 forwarder_block_p calls. */
57 BB_FORWARDER_BLOCK = 1
60 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
61 #define BB_SET_FLAG(BB, FLAG) \
62 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
63 #define BB_CLEAR_FLAG(BB, FLAG) \
64 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
66 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
68 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
69 static bool try_crossjump_bb PARAMS ((int, basic_block));
70 static bool outgoing_edges_match PARAMS ((int,
71 basic_block, basic_block));
72 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
74 static bool insns_match_p PARAMS ((int, rtx, rtx));
76 static bool delete_unreachable_blocks PARAMS ((void));
77 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
78 static bool tail_recursion_label_p PARAMS ((rtx));
79 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
81 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
83 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
85 static bool try_optimize_cfg PARAMS ((int));
86 static bool try_simplify_condjump PARAMS ((basic_block));
87 static bool try_forward_edges PARAMS ((int, basic_block));
88 static edge thread_jump PARAMS ((int, edge, basic_block));
89 static bool mark_effect PARAMS ((rtx, bitmap));
90 static void notice_new_block PARAMS ((basic_block));
91 static void update_forwarder_flag PARAMS ((basic_block));
93 /* Set flags for newly created block. */
102 if (forwarder_block_p (bb))
103 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
106 /* Recompute forwarder flag after block has been modified. */
109 update_forwarder_flag (bb)
112 if (forwarder_block_p (bb))
113 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
115 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
118 /* Simplify a conditional jump around an unconditional jump.
119 Return true if something changed. */
122 try_simplify_condjump (cbranch_block)
123 basic_block cbranch_block;
125 basic_block jump_block, jump_dest_block, cbranch_dest_block;
126 edge cbranch_jump_edge, cbranch_fallthru_edge;
129 /* Verify that there are exactly two successors. */
130 if (!cbranch_block->succ
131 || !cbranch_block->succ->succ_next
132 || cbranch_block->succ->succ_next->succ_next)
135 /* Verify that we've got a normal conditional branch at the end
137 cbranch_insn = cbranch_block->end;
138 if (!any_condjump_p (cbranch_insn))
141 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
142 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
144 /* The next block must not have multiple predecessors, must not
145 be the last block in the function, and must contain just the
146 unconditional jump. */
147 jump_block = cbranch_fallthru_edge->dest;
148 if (jump_block->pred->pred_next
149 || jump_block->index == n_basic_blocks - 1
150 || !FORWARDER_BLOCK_P (jump_block))
152 jump_dest_block = jump_block->succ->dest;
154 /* The conditional branch must target the block after the
155 unconditional branch. */
156 cbranch_dest_block = cbranch_jump_edge->dest;
158 if (!can_fallthru (jump_block, cbranch_dest_block))
161 /* Invert the conditional branch. */
162 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
166 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
167 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
169 /* Success. Update the CFG to match. Note that after this point
170 the edge variable names appear backwards; the redirection is done
171 this way to preserve edge profile data. */
172 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
174 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
176 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
177 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
178 update_br_prob_note (cbranch_block);
180 /* Delete the block with the unconditional jump, and clean up the mess. */
181 flow_delete_block (jump_block);
182 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
187 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
188 on register. Used by jump threading. */
191 mark_effect (exp, nonequal)
197 switch (GET_CODE (exp))
199 /* In case we do clobber the register, mark it as equal, as we know the
200 value is dead so it don't have to match. */
202 if (REG_P (XEXP (exp, 0)))
204 dest = XEXP (exp, 0);
205 regno = REGNO (dest);
206 CLEAR_REGNO_REG_SET (nonequal, regno);
207 if (regno < FIRST_PSEUDO_REGISTER)
209 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
211 CLEAR_REGNO_REG_SET (nonequal, regno + n);
217 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
219 dest = SET_DEST (exp);
224 regno = REGNO (dest);
225 SET_REGNO_REG_SET (nonequal, regno);
226 if (regno < FIRST_PSEUDO_REGISTER)
228 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
230 SET_REGNO_REG_SET (nonequal, regno + n);
238 /* Attempt to prove that the basic block B will have no side effects and
239 allways continues in the same edge if reached via E. Return the edge
240 if exist, NULL otherwise. */
243 thread_jump (mode, e, b)
248 rtx set1, set2, cond1, cond2, insn;
249 enum rtx_code code1, code2, reversed_code2;
250 bool reverse1 = false;
255 /* At the moment, we do handle only conditional jumps, but later we may
256 want to extend this code to tablejumps and others. */
257 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
259 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
262 /* Second branch must end with onlyjump, as we will eliminate the jump. */
263 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
264 || !onlyjump_p (b->end))
267 set1 = pc_set (e->src->end);
268 set2 = pc_set (b->end);
269 if (((e->flags & EDGE_FALLTHRU) != 0)
270 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
273 cond1 = XEXP (SET_SRC (set1), 0);
274 cond2 = XEXP (SET_SRC (set2), 0);
276 code1 = reversed_comparison_code (cond1, e->src->end);
278 code1 = GET_CODE (cond1);
280 code2 = GET_CODE (cond2);
281 reversed_code2 = reversed_comparison_code (cond2, b->end);
283 if (!comparison_dominates_p (code1, code2)
284 && !comparison_dominates_p (code1, reversed_code2))
287 /* Ensure that the comparison operators are equivalent.
288 ??? This is far too pesimistic. We should allow swapped operands,
289 different CCmodes, or for example comparisons for interval, that
290 dominate even when operands are not equivalent. */
291 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
292 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
295 /* Short circuit cases where block B contains some side effects, as we can't
297 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
298 insn = NEXT_INSN (insn))
299 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
304 /* First process all values computed in the source basic block. */
305 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
306 insn = NEXT_INSN (insn))
308 cselib_process_insn (insn);
310 nonequal = BITMAP_XMALLOC();
311 CLEAR_REG_SET (nonequal);
313 /* Now assume that we've continued by the edge E to B and continue
314 processing as if it were same basic block.
315 Our goal is to prove that whole block is an NOOP. */
317 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
318 insn = NEXT_INSN (insn))
322 rtx pat = PATTERN (insn);
324 if (GET_CODE (pat) == PARALLEL)
326 for (i = 0; i < XVECLEN (pat, 0); i++)
327 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
330 failed |= mark_effect (pat, nonequal);
333 cselib_process_insn (insn);
336 /* Later we should clear nonequal of dead registers. So far we don't
337 have life information in cfg_cleanup. */
341 /* In case liveness information is available, we need to prove equivalence
342 only of the live values. */
343 if (mode & CLEANUP_UPDATE_LIFE)
344 AND_REG_SET (nonequal, b->global_live_at_end);
346 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
348 BITMAP_XFREE (nonequal);
350 if ((comparison_dominates_p (code1, code2) != 0)
351 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
352 return BRANCH_EDGE (b);
354 return FALLTHRU_EDGE (b);
357 BITMAP_XFREE (nonequal);
362 /* Attempt to forward edges leaving basic block B.
363 Return true if successful. */
366 try_forward_edges (mode, b)
370 bool changed = false;
371 edge e, next, *threaded_edges = NULL;
373 for (e = b->succ; e; e = next)
375 basic_block target, first;
377 bool threaded = false;
378 int nthreaded_edges = 0;
382 /* Skip complex edges because we don't know how to update them.
384 Still handle fallthru edges, as we can succeed to forward fallthru
385 edge to the same place as the branch edge of conditional branch
386 and turn conditional branch to an unconditional branch. */
387 if (e->flags & EDGE_COMPLEX)
390 target = first = e->dest;
393 while (counter < n_basic_blocks)
395 basic_block new_target = NULL;
396 bool new_target_threaded = false;
398 if (FORWARDER_BLOCK_P (target)
399 && target->succ->dest != EXIT_BLOCK_PTR)
401 /* Bypass trivial infinite loops. */
402 if (target == target->succ->dest)
403 counter = n_basic_blocks;
404 new_target = target->succ->dest;
407 /* Allow to thread only over one edge at time to simplify updating
409 else if (mode & CLEANUP_THREADING)
411 edge t = thread_jump (mode, e, target);
415 threaded_edges = xmalloc (sizeof (*threaded_edges)
421 /* Detect an infinite loop across blocks not
422 including the start block. */
423 for (i = 0; i < nthreaded_edges; ++i)
424 if (threaded_edges[i] == t)
426 if (i < nthreaded_edges)
428 counter = n_basic_blocks;
433 /* Detect an infinite loop across the start block. */
437 if (nthreaded_edges >= n_basic_blocks)
439 threaded_edges[nthreaded_edges++] = t;
441 new_target = t->dest;
442 new_target_threaded = true;
449 /* Avoid killing of loop pre-headers, as it is the place loop
450 optimizer wants to hoist code to.
452 For fallthru forwarders, the LOOP_BEG note must appear between
453 the header of block and CODE_LABEL of the loop, for non forwarders
454 it must appear before the JUMP_INSN. */
455 if (mode & CLEANUP_PRE_LOOP)
457 rtx insn = (target->succ->flags & EDGE_FALLTHRU
458 ? target->head : prev_nonnote_insn (target->end));
460 if (GET_CODE (insn) != NOTE)
461 insn = NEXT_INSN (insn);
463 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
464 insn = NEXT_INSN (insn))
465 if (GET_CODE (insn) == NOTE
466 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
469 if (GET_CODE (insn) == NOTE)
475 threaded |= new_target_threaded;
478 if (counter >= n_basic_blocks)
481 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
484 else if (target == first)
485 ; /* We didn't do anything. */
488 /* Save the values now, as the edge may get removed. */
489 gcov_type edge_count = e->count;
490 int edge_probability = e->probability;
494 /* Don't force if target is exit block. */
495 if (threaded && target != EXIT_BLOCK_PTR)
497 notice_new_block (redirect_edge_and_branch_force (e, target));
499 fprintf (rtl_dump_file, "Conditionals threaded.\n");
501 else if (!redirect_edge_and_branch (e, target))
504 fprintf (rtl_dump_file,
505 "Forwarding edge %i->%i to %i failed.\n",
506 b->index, e->dest->index, target->index);
510 /* We successfully forwarded the edge. Now update profile
511 data: for each edge we traversed in the chain, remove
512 the original edge's execution count. */
513 edge_frequency = ((edge_probability * b->frequency
514 + REG_BR_PROB_BASE / 2)
517 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
518 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
524 first->count -= edge_count;
525 if (first->count < 0)
527 first->frequency -= edge_frequency;
528 if (first->frequency < 0)
529 first->frequency = 0;
530 if (first->succ->succ_next)
534 if (n >= nthreaded_edges)
536 t = threaded_edges [n++];
539 if (first->frequency)
540 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
543 if (prob > t->probability)
544 prob = t->probability;
545 t->probability -= prob;
546 prob = REG_BR_PROB_BASE - prob;
549 first->succ->probability = REG_BR_PROB_BASE;
550 first->succ->succ_next->probability = 0;
553 for (e = first->succ; e; e = e->succ_next)
554 e->probability = ((e->probability * REG_BR_PROB_BASE)
556 update_br_prob_note (first);
560 /* It is possible that as the result of
561 threading we've removed edge as it is
562 threaded to the fallthru edge. Avoid
563 getting out of sync. */
564 if (n < nthreaded_edges
565 && first == threaded_edges [n]->src)
570 t->count -= edge_count;
575 while (first != target);
582 free (threaded_edges);
586 /* Return true if LABEL is a target of JUMP_INSN. This applies only
587 to non-complex jumps. That is, direct unconditional, conditional,
588 and tablejumps, but not computed jumps or returns. It also does
589 not apply to the fallthru case of a conditional jump. */
592 label_is_jump_target_p (label, jump_insn)
593 rtx label, jump_insn;
595 rtx tmp = JUMP_LABEL (jump_insn);
601 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
602 && GET_CODE (tmp) == JUMP_INSN
603 && (tmp = PATTERN (tmp),
604 GET_CODE (tmp) == ADDR_VEC
605 || GET_CODE (tmp) == ADDR_DIFF_VEC))
607 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
608 int i, veclen = GET_NUM_ELEM (vec);
610 for (i = 0; i < veclen; ++i)
611 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
618 /* Return true if LABEL is used for tail recursion. */
621 tail_recursion_label_p (label)
626 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
627 if (label == XEXP (x, 0))
633 /* Blocks A and B are to be merged into a single block. A has no incoming
634 fallthru edge, so it can be moved before B without adding or modifying
635 any jumps (aside from the jump from A to B). */
638 merge_blocks_move_predecessor_nojumps (a, b)
644 barrier = next_nonnote_insn (a->end);
645 if (GET_CODE (barrier) != BARRIER)
647 delete_insn (barrier);
649 /* Move block and loop notes out of the chain so that we do not
652 ??? A better solution would be to squeeze out all the non-nested notes
653 and adjust the block trees appropriately. Even better would be to have
654 a tighter connection between block trees and rtl so that this is not
656 if (squeeze_notes (&a->head, &a->end))
659 /* Scramble the insn chain. */
660 if (a->end != PREV_INSN (b->head))
661 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
662 a->flags |= BB_DIRTY;
665 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
668 /* Swap the records for the two blocks around. Although we are deleting B,
669 A is now where B was and we want to compact the BB array from where
671 BASIC_BLOCK (a->index) = b;
672 BASIC_BLOCK (b->index) = a;
677 /* Now blocks A and B are contiguous. Merge them. */
678 merge_blocks_nomove (a, b);
681 /* Blocks A and B are to be merged into a single block. B has no outgoing
682 fallthru edge, so it can be moved after A without adding or modifying
683 any jumps (aside from the jump from A to B). */
686 merge_blocks_move_successor_nojumps (a, b)
689 rtx barrier, real_b_end;
692 barrier = NEXT_INSN (b->end);
694 /* Recognize a jump table following block B. */
696 && GET_CODE (barrier) == CODE_LABEL
697 && NEXT_INSN (barrier)
698 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
699 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
700 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
702 /* Temporarily add the table jump insn to b, so that it will also
703 be moved to the correct location. */
704 b->end = NEXT_INSN (barrier);
705 barrier = NEXT_INSN (b->end);
708 /* There had better have been a barrier there. Delete it. */
709 if (barrier && GET_CODE (barrier) == BARRIER)
710 delete_insn (barrier);
712 /* Move block and loop notes out of the chain so that we do not
715 ??? A better solution would be to squeeze out all the non-nested notes
716 and adjust the block trees appropriately. Even better would be to have
717 a tighter connection between block trees and rtl so that this is not
719 if (squeeze_notes (&b->head, &b->end))
722 /* Scramble the insn chain. */
723 reorder_insns_nobb (b->head, b->end, a->end);
725 /* Restore the real end of b. */
728 /* Now blocks A and B are contiguous. Merge them. */
729 merge_blocks_nomove (a, b);
732 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
736 /* Attempt to merge basic blocks that are potentially non-adjacent.
737 Return true iff the attempt succeeded. */
740 merge_blocks (e, b, c, mode)
745 /* If C has a tail recursion label, do not merge. There is no
746 edge recorded from the call_placeholder back to this label, as
747 that would make optimize_sibling_and_tail_recursive_calls more
748 complex for no gain. */
749 if ((mode & CLEANUP_PRE_SIBCALL)
750 && GET_CODE (c->head) == CODE_LABEL
751 && tail_recursion_label_p (c->head))
754 /* If B has a fallthru edge to C, no need to move anything. */
755 if (e->flags & EDGE_FALLTHRU)
757 int b_index = b->index, c_index = c->index;
758 merge_blocks_nomove (b, c);
759 update_forwarder_flag (b);
762 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
768 /* Otherwise we will need to move code around. Do that only if expensive
769 transformations are allowed. */
770 else if (mode & CLEANUP_EXPENSIVE)
772 edge tmp_edge, b_fallthru_edge;
773 bool c_has_outgoing_fallthru;
774 bool b_has_incoming_fallthru;
776 /* Avoid overactive code motion, as the forwarder blocks should be
777 eliminated by edge redirection instead. One exception might have
778 been if B is a forwarder block and C has no fallthru edge, but
779 that should be cleaned up by bb-reorder instead. */
780 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
783 /* We must make sure to not munge nesting of lexical blocks,
784 and loop notes. This is done by squeezing out all the notes
785 and leaving them there to lie. Not ideal, but functional. */
787 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
788 if (tmp_edge->flags & EDGE_FALLTHRU)
791 c_has_outgoing_fallthru = (tmp_edge != NULL);
793 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
794 if (tmp_edge->flags & EDGE_FALLTHRU)
797 b_has_incoming_fallthru = (tmp_edge != NULL);
798 b_fallthru_edge = tmp_edge;
800 /* Otherwise, we're going to try to move C after B. If C does
801 not have an outgoing fallthru, then it can be moved
802 immediately after B without introducing or modifying jumps. */
803 if (! c_has_outgoing_fallthru)
805 merge_blocks_move_successor_nojumps (b, c);
809 /* If B does not have an incoming fallthru, then it can be moved
810 immediately before C without introducing or modifying jumps.
811 C cannot be the first block, so we do not have to worry about
812 accessing a non-existent block. */
814 if (b_has_incoming_fallthru)
818 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
820 bb = force_nonfallthru (b_fallthru_edge);
822 notice_new_block (bb);
825 merge_blocks_move_predecessor_nojumps (b, c);
833 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
836 insns_match_p (mode, i1, i2)
837 int mode ATTRIBUTE_UNUSED;
842 /* Verify that I1 and I2 are equivalent. */
843 if (GET_CODE (i1) != GET_CODE (i2))
849 if (GET_CODE (p1) != GET_CODE (p2))
852 /* If this is a CALL_INSN, compare register usage information.
853 If we don't check this on stack register machines, the two
854 CALL_INSNs might be merged leaving reg-stack.c with mismatching
855 numbers of stack registers in the same basic block.
856 If we don't check this on machines with delay slots, a delay slot may
857 be filled that clobbers a parameter expected by the subroutine.
859 ??? We take the simple route for now and assume that if they're
860 equal, they were constructed identically. */
862 if (GET_CODE (i1) == CALL_INSN
863 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
864 CALL_INSN_FUNCTION_USAGE (i2)))
868 /* If cross_jump_death_matters is not 0, the insn's mode
869 indicates whether or not the insn contains any stack-like
872 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
874 /* If register stack conversion has already been done, then
875 death notes must also be compared before it is certain that
876 the two instruction streams match. */
879 HARD_REG_SET i1_regset, i2_regset;
881 CLEAR_HARD_REG_SET (i1_regset);
882 CLEAR_HARD_REG_SET (i2_regset);
884 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
885 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
886 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
888 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
889 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
890 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
892 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
902 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
904 /* The following code helps take care of G++ cleanups. */
905 rtx equiv1 = find_reg_equal_equiv_note (i1);
906 rtx equiv2 = find_reg_equal_equiv_note (i2);
909 /* If the equivalences are not to a constant, they may
910 reference pseudos that no longer exist, so we can't
912 && (! reload_completed
913 || (CONSTANT_P (XEXP (equiv1, 0))
914 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
916 rtx s1 = single_set (i1);
917 rtx s2 = single_set (i2);
918 if (s1 != 0 && s2 != 0
919 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
921 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
922 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
923 if (! rtx_renumbered_equal_p (p1, p2))
925 else if (apply_change_group ())
936 /* Look through the insns at the end of BB1 and BB2 and find the longest
937 sequence that are equivalent. Store the first insns for that sequence
938 in *F1 and *F2 and return the sequence length.
940 To simplify callers of this function, if the blocks match exactly,
941 store the head of the blocks in *F1 and *F2. */
944 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
945 int mode ATTRIBUTE_UNUSED;
946 basic_block bb1, bb2;
949 rtx i1, i2, last1, last2, afterlast1, afterlast2;
952 /* Skip simple jumps at the end of the blocks. Complex jumps still
953 need to be compared for equivalence, which we'll do below. */
956 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
958 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
966 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
969 /* Count everything except for unconditional jump as insn. */
970 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
978 while (!active_insn_p (i1) && i1 != bb1->head)
981 while (!active_insn_p (i2) && i2 != bb2->head)
984 if (i1 == bb1->head || i2 == bb2->head)
987 if (!insns_match_p (mode, i1, i2))
990 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
991 if (active_insn_p (i1))
993 /* If the merged insns have different REG_EQUAL notes, then
995 rtx equiv1 = find_reg_equal_equiv_note (i1);
996 rtx equiv2 = find_reg_equal_equiv_note (i2);
998 if (equiv1 && !equiv2)
999 remove_note (i1, equiv1);
1000 else if (!equiv1 && equiv2)
1001 remove_note (i2, equiv2);
1002 else if (equiv1 && equiv2
1003 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1005 remove_note (i1, equiv1);
1006 remove_note (i2, equiv2);
1009 afterlast1 = last1, afterlast2 = last2;
1010 last1 = i1, last2 = i2;
1014 i1 = PREV_INSN (i1);
1015 i2 = PREV_INSN (i2);
1019 /* Don't allow the insn after a compare to be shared by
1020 cross-jumping unless the compare is also shared. */
1021 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1022 last1 = afterlast1, last2 = afterlast2, ninsns--;
1025 /* Include preceding notes and labels in the cross-jump. One,
1026 this may bring us to the head of the blocks as requested above.
1027 Two, it keeps line number notes as matched as may be. */
1030 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
1031 last1 = PREV_INSN (last1);
1033 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1034 last1 = PREV_INSN (last1);
1036 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
1037 last2 = PREV_INSN (last2);
1039 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1040 last2 = PREV_INSN (last2);
1049 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1050 the branch instruction. This means that if we commonize the control
1051 flow before end of the basic block, the semantic remains unchanged.
1053 We may assume that there exists one edge with a common destination. */
1056 outgoing_edges_match (mode, bb1, bb2)
1061 int nehedges1 = 0, nehedges2 = 0;
1062 edge fallthru1 = 0, fallthru2 = 0;
1065 /* If BB1 has only one successor, we may be looking at either an
1066 unconditional jump, or a fake edge to exit. */
1067 if (bb1->succ && !bb1->succ->succ_next
1068 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
1069 return (bb2->succ && !bb2->succ->succ_next
1070 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
1072 /* Match conditional jumps - this may get tricky when fallthru and branch
1073 edges are crossed. */
1075 && bb1->succ->succ_next
1076 && !bb1->succ->succ_next->succ_next
1077 && any_condjump_p (bb1->end)
1078 && onlyjump_p (bb1->end))
1080 edge b1, f1, b2, f2;
1081 bool reverse, match;
1082 rtx set1, set2, cond1, cond2;
1083 enum rtx_code code1, code2;
1086 || !bb2->succ->succ_next
1087 || bb1->succ->succ_next->succ_next
1088 || !any_condjump_p (bb2->end)
1089 || !onlyjump_p (bb1->end))
1092 b1 = BRANCH_EDGE (bb1);
1093 b2 = BRANCH_EDGE (bb2);
1094 f1 = FALLTHRU_EDGE (bb1);
1095 f2 = FALLTHRU_EDGE (bb2);
1097 /* Get around possible forwarders on fallthru edges. Other cases
1098 should be optimized out already. */
1099 if (FORWARDER_BLOCK_P (f1->dest))
1100 f1 = f1->dest->succ;
1102 if (FORWARDER_BLOCK_P (f2->dest))
1103 f2 = f2->dest->succ;
1105 /* To simplify use of this function, return false if there are
1106 unneeded forwarder blocks. These will get eliminated later
1107 during cleanup_cfg. */
1108 if (FORWARDER_BLOCK_P (f1->dest)
1109 || FORWARDER_BLOCK_P (f2->dest)
1110 || FORWARDER_BLOCK_P (b1->dest)
1111 || FORWARDER_BLOCK_P (b2->dest))
1114 if (f1->dest == f2->dest && b1->dest == b2->dest)
1116 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1121 set1 = pc_set (bb1->end);
1122 set2 = pc_set (bb2->end);
1123 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1124 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1127 cond1 = XEXP (SET_SRC (set1), 0);
1128 cond2 = XEXP (SET_SRC (set2), 0);
1129 code1 = GET_CODE (cond1);
1131 code2 = reversed_comparison_code (cond2, bb2->end);
1133 code2 = GET_CODE (cond2);
1135 if (code2 == UNKNOWN)
1138 /* Verify codes and operands match. */
1139 match = ((code1 == code2
1140 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1141 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1142 || (code1 == swap_condition (code2)
1143 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1145 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1148 /* If we return true, we will join the blocks. Which means that
1149 we will only have one branch prediction bit to work with. Thus
1150 we require the existing branches to have probabilities that are
1154 && bb1->frequency > BB_FREQ_MAX / 1000
1155 && bb2->frequency > BB_FREQ_MAX / 1000)
1159 if (b1->dest == b2->dest)
1160 prob2 = b2->probability;
1162 /* Do not use f2 probability as f2 may be forwarded. */
1163 prob2 = REG_BR_PROB_BASE - b2->probability;
1165 /* Fail if the difference in probabilities is
1167 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 20)
1170 fprintf (rtl_dump_file,
1171 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1172 bb1->index, bb2->index, b1->probability, prob2);
1178 if (rtl_dump_file && match)
1179 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1180 bb1->index, bb2->index);
1185 /* Generic case - we are seeing an computed jump, table jump or trapping
1188 /* First ensure that the instructions match. There may be many outgoing
1189 edges so this test is generally cheaper.
1190 ??? Currently the tablejumps will never match, as they do have
1191 different tables. */
1192 if (!insns_match_p (mode, bb1->end, bb2->end))
1195 /* Search the outgoing edges, ensure that the counts do match, find possible
1196 fallthru and exception handling edges since these needs more
1198 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1199 e1 = e1->succ_next, e2 = e2->succ_next)
1201 if (e1->flags & EDGE_EH)
1204 if (e2->flags & EDGE_EH)
1207 if (e1->flags & EDGE_FALLTHRU)
1209 if (e2->flags & EDGE_FALLTHRU)
1213 /* If number of edges of various types does not match, fail. */
1215 || nehedges1 != nehedges2
1216 || (fallthru1 != 0) != (fallthru2 != 0))
1219 /* fallthru edges must be forwarded to the same destination. */
1222 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1223 ? fallthru1->dest->succ->dest: fallthru1->dest);
1224 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1225 ? fallthru2->dest->succ->dest: fallthru2->dest);
1231 /* In case we do have EH edges, ensure we are in the same region. */
1234 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1235 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1237 if (XEXP (n1, 0) != XEXP (n2, 0))
1241 /* We don't need to match the rest of edges as above checks should be enought
1242 to ensure that they are equivalent. */
1246 /* E1 and E2 are edges with the same destination block. Search their
1247 predecessors for common code. If found, redirect control flow from
1248 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1251 try_crossjump_to_edge (mode, e1, e2)
1256 basic_block src1 = e1->src, src2 = e2->src;
1257 basic_block redirect_to;
1258 rtx newpos1, newpos2;
1263 /* Search backward through forwarder blocks. We don't need to worry
1264 about multiple entry or chained forwarders, as they will be optimized
1265 away. We do this to look past the unconditional jump following a
1266 conditional jump that is required due to the current CFG shape. */
1268 && !src1->pred->pred_next
1269 && FORWARDER_BLOCK_P (src1))
1270 e1 = src1->pred, src1 = e1->src;
1273 && !src2->pred->pred_next
1274 && FORWARDER_BLOCK_P (src2))
1275 e2 = src2->pred, src2 = e2->src;
1277 /* Nothing to do if we reach ENTRY, or a common source block. */
1278 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1283 /* Seeing more than 1 forwarder blocks would confuse us later... */
1284 if (FORWARDER_BLOCK_P (e1->dest)
1285 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1288 if (FORWARDER_BLOCK_P (e2->dest)
1289 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1292 /* Likewise with dead code (possibly newly created by the other optimizations
1294 if (!src1->pred || !src2->pred)
1297 /* Look for the common insn sequence, part the first ... */
1298 if (!outgoing_edges_match (mode, src1, src2))
1301 /* ... and part the second. */
1302 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1306 /* Avoid splitting if possible. */
1307 if (newpos2 == src2->head)
1312 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1313 src2->index, nmatch);
1314 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1318 fprintf (rtl_dump_file,
1319 "Cross jumping from bb %i to bb %i; %i common insns\n",
1320 src1->index, src2->index, nmatch);
1322 redirect_to->count += src1->count;
1323 redirect_to->frequency += src1->frequency;
1325 /* Recompute the frequencies and counts of outgoing edges. */
1326 for (s = redirect_to->succ; s; s = s->succ_next)
1329 basic_block d = s->dest;
1331 if (FORWARDER_BLOCK_P (d))
1334 for (s2 = src1->succ; ; s2 = s2->succ_next)
1336 basic_block d2 = s2->dest;
1337 if (FORWARDER_BLOCK_P (d2))
1338 d2 = d2->succ->dest;
1343 s->count += s2->count;
1345 /* Take care to update possible forwarder blocks. We verified
1346 that there is no more than one in the chain, so we can't run
1347 into infinite loop. */
1348 if (FORWARDER_BLOCK_P (s->dest))
1350 s->dest->succ->count += s2->count;
1351 s->dest->count += s2->count;
1352 s->dest->frequency += EDGE_FREQUENCY (s);
1355 if (FORWARDER_BLOCK_P (s2->dest))
1357 s2->dest->succ->count -= s2->count;
1358 if (s2->dest->succ->count < 0)
1359 s2->dest->succ->count = 0;
1360 s2->dest->count -= s2->count;
1361 s2->dest->frequency -= EDGE_FREQUENCY (s);
1362 if (s2->dest->frequency < 0)
1363 s2->dest->frequency = 0;
1364 if (s2->dest->count < 0)
1365 s2->dest->count = 0;
1368 if (!redirect_to->frequency && !src1->frequency)
1369 s->probability = (s->probability + s2->probability) / 2;
1372 = ((s->probability * redirect_to->frequency +
1373 s2->probability * src1->frequency)
1374 / (redirect_to->frequency + src1->frequency));
1377 update_br_prob_note (redirect_to);
1379 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1381 /* Skip possible basic block header. */
1382 if (GET_CODE (newpos1) == CODE_LABEL)
1383 newpos1 = NEXT_INSN (newpos1);
1385 if (GET_CODE (newpos1) == NOTE)
1386 newpos1 = NEXT_INSN (newpos1);
1389 /* Emit the jump insn. */
1390 label = block_label (redirect_to);
1391 emit_jump_insn_after (gen_jump (label), src1->end);
1392 JUMP_LABEL (src1->end) = label;
1393 LABEL_NUSES (label)++;
1395 /* Delete the now unreachable instructions. */
1396 delete_insn_chain (newpos1, last);
1398 /* Make sure there is a barrier after the new jump. */
1399 last = next_nonnote_insn (src1->end);
1400 if (!last || GET_CODE (last) != BARRIER)
1401 emit_barrier_after (src1->end);
1405 remove_edge (src1->succ);
1406 make_single_succ_edge (src1, redirect_to, 0);
1408 update_forwarder_flag (src1);
1413 /* Search the predecessors of BB for common insn sequences. When found,
1414 share code between them by redirecting control flow. Return true if
1415 any changes made. */
1418 try_crossjump_bb (mode, bb)
1422 edge e, e2, nexte2, nexte, fallthru;
1425 /* Nothing to do if there is not at least two incoming edges. */
1426 if (!bb->pred || !bb->pred->pred_next)
1429 /* It is always cheapest to redirect a block that ends in a branch to
1430 a block that falls through into BB, as that adds no branches to the
1431 program. We'll try that combination first. */
1432 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1433 if (fallthru->flags & EDGE_FALLTHRU)
1437 for (e = bb->pred; e; e = nexte)
1439 nexte = e->pred_next;
1441 /* As noted above, first try with the fallthru predecessor. */
1444 /* Don't combine the fallthru edge into anything else.
1445 If there is a match, we'll do it the other way around. */
1449 if (try_crossjump_to_edge (mode, e, fallthru))
1457 /* Non-obvious work limiting check: Recognize that we're going
1458 to call try_crossjump_bb on every basic block. So if we have
1459 two blocks with lots of outgoing edges (a switch) and they
1460 share lots of common destinations, then we would do the
1461 cross-jump check once for each common destination.
1463 Now, if the blocks actually are cross-jump candidates, then
1464 all of their destinations will be shared. Which means that
1465 we only need check them for cross-jump candidacy once. We
1466 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1467 choosing to do the check from the block for which the edge
1468 in question is the first successor of A. */
1469 if (e->src->succ != e)
1472 for (e2 = bb->pred; e2; e2 = nexte2)
1474 nexte2 = e2->pred_next;
1479 /* We've already checked the fallthru edge above. */
1483 /* The "first successor" check above only prevents multiple
1484 checks of crossjump(A,B). In order to prevent redundant
1485 checks of crossjump(B,A), require that A be the block
1486 with the lowest index. */
1487 if (e->src->index > e2->src->index)
1490 if (try_crossjump_to_edge (mode, e, e2))
1502 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1503 instructions etc. Return nonzero if changes were made. */
1506 try_optimize_cfg (mode)
1510 bool changed_overall = false;
1515 if (mode & CLEANUP_CROSSJUMP)
1516 add_noreturn_fake_exit_edges ();
1518 for (i = 0; i < n_basic_blocks; i++)
1519 update_forwarder_flag (BASIC_BLOCK (i));
1521 if (mode & CLEANUP_UPDATE_LIFE)
1524 if (! (* targetm.cannot_modify_jumps_p) ())
1526 /* Attempt to merge blocks as made possible by edge removal. If
1527 a block has only one successor, and the successor has only
1528 one predecessor, they may be combined. */
1535 fprintf (rtl_dump_file,
1536 "\n\ntry_optimize_cfg iteration %i\n\n",
1539 for (i = 0; i < n_basic_blocks;)
1541 basic_block c, b = BASIC_BLOCK (i);
1543 bool changed_here = false;
1545 /* Delete trivially dead basic blocks. */
1546 while (b->pred == NULL)
1548 c = BASIC_BLOCK (b->index - 1);
1550 fprintf (rtl_dump_file, "Deleting block %i.\n",
1553 flow_delete_block (b);
1558 /* Remove code labels no longer used. Don't do this
1559 before CALL_PLACEHOLDER is removed, as some branches
1560 may be hidden within. */
1561 if (b->pred->pred_next == NULL
1562 && (b->pred->flags & EDGE_FALLTHRU)
1563 && !(b->pred->flags & EDGE_COMPLEX)
1564 && GET_CODE (b->head) == CODE_LABEL
1565 && (!(mode & CLEANUP_PRE_SIBCALL)
1566 || !tail_recursion_label_p (b->head))
1567 /* If the previous block ends with a branch to this
1568 block, we can't delete the label. Normally this
1569 is a condjump that is yet to be simplified, but
1570 if CASE_DROPS_THRU, this can be a tablejump with
1571 some element going to the same place as the
1572 default (fallthru). */
1573 && (b->pred->src == ENTRY_BLOCK_PTR
1574 || GET_CODE (b->pred->src->end) != JUMP_INSN
1575 || ! label_is_jump_target_p (b->head,
1576 b->pred->src->end)))
1578 rtx label = b->head;
1580 b->head = NEXT_INSN (b->head);
1581 delete_insn_chain (label, label);
1583 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1587 /* If we fall through an empty block, we can remove it. */
1588 if (b->pred->pred_next == NULL
1589 && (b->pred->flags & EDGE_FALLTHRU)
1590 && GET_CODE (b->head) != CODE_LABEL
1591 && FORWARDER_BLOCK_P (b)
1592 /* Note that forwarder_block_p true ensures that
1593 there is a successor for this block. */
1594 && (b->succ->flags & EDGE_FALLTHRU)
1595 && n_basic_blocks > 1)
1598 fprintf (rtl_dump_file,
1599 "Deleting fallthru block %i.\n",
1602 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1603 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1604 flow_delete_block (b);
1609 /* Merge blocks. Loop because chains of blocks might be
1611 while ((s = b->succ) != NULL
1612 && s->succ_next == NULL
1613 && !(s->flags & EDGE_COMPLEX)
1614 && (c = s->dest) != EXIT_BLOCK_PTR
1615 && c->pred->pred_next == NULL
1616 /* If the jump insn has side effects,
1617 we can't kill the edge. */
1618 && (GET_CODE (b->end) != JUMP_INSN
1619 || onlyjump_p (b->end))
1620 && merge_blocks (s, b, c, mode))
1621 changed_here = true;
1623 /* Simplify branch over branch. */
1624 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1625 changed_here = true;
1627 /* If B has a single outgoing edge, but uses a
1628 non-trivial jump instruction without side-effects, we
1629 can either delete the jump entirely, or replace it
1630 with a simple unconditional jump. Use
1631 redirect_edge_and_branch to do the dirty work. */
1633 && ! b->succ->succ_next
1634 && b->succ->dest != EXIT_BLOCK_PTR
1635 && onlyjump_p (b->end)
1636 && redirect_edge_and_branch (b->succ, b->succ->dest))
1638 update_forwarder_flag (b);
1639 changed_here = true;
1642 /* Simplify branch to branch. */
1643 if (try_forward_edges (mode, b))
1644 changed_here = true;
1646 /* Look for shared code between blocks. */
1647 if ((mode & CLEANUP_CROSSJUMP)
1648 && try_crossjump_bb (mode, b))
1649 changed_here = true;
1651 /* Don't get confused by the index shift caused by
1659 if ((mode & CLEANUP_CROSSJUMP)
1660 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1663 #ifdef ENABLE_CHECKING
1665 verify_flow_info ();
1668 changed_overall |= changed;
1673 if (mode & CLEANUP_CROSSJUMP)
1674 remove_fake_edges ();
1676 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1677 update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL,
1678 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1679 | PROP_KILL_DEAD_CODE | PROP_LOG_LINKS);
1681 for (i = 0; i < n_basic_blocks; i++)
1682 BASIC_BLOCK (i)->aux = NULL;
1684 return changed_overall;
1687 /* Delete all unreachable basic blocks. */
1690 delete_unreachable_blocks ()
1693 bool changed = false;
1695 find_unreachable_blocks ();
1697 /* Delete all unreachable basic blocks. Count down so that we
1698 don't interfere with the block renumbering that happens in
1699 flow_delete_block. */
1701 for (i = n_basic_blocks - 1; i >= 0; --i)
1703 basic_block b = BASIC_BLOCK (i);
1705 if (!(b->flags & BB_REACHABLE))
1706 flow_delete_block (b), changed = true;
1710 tidy_fallthru_edges ();
1714 /* Tidy the CFG by deleting unreachable code and whatnot. */
1720 bool changed = false;
1722 timevar_push (TV_CLEANUP_CFG);
1723 changed = delete_unreachable_blocks ();
1724 if (try_optimize_cfg (mode))
1725 delete_unreachable_blocks (), changed = true;
1727 /* Kill the data we won't maintain. */
1728 free_EXPR_LIST_list (&label_value_list);
1729 free_EXPR_LIST_list (&tail_recursion_label_list);
1730 timevar_pop (TV_CLEANUP_CFG);