1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
43 #include "insn-config.h"
51 #include "cfglayout.h"
53 #include "tree-pass.h"
57 /* cleanup_cfg maintains following flags for each basic block. */
61 /* Set if BB is the forwarder block to avoid too many
62 forwarder_block_p calls. */
63 BB_FORWARDER_BLOCK = 1,
64 BB_NONTHREADABLE_BLOCK = 2
67 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
68 #define BB_SET_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
70 #define BB_CLEAR_FLAG(BB, FLAG) \
71 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
73 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
75 /* Set to true when we are running first pass of try_optimize_cfg loop. */
76 static bool first_pass;
77 static bool try_crossjump_to_edge (int, edge, edge);
78 static bool try_crossjump_bb (int, basic_block);
79 static bool outgoing_edges_match (int, basic_block, basic_block);
80 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
81 static bool insns_match_p (int, rtx, rtx);
83 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
84 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
85 static bool try_optimize_cfg (int);
86 static bool try_simplify_condjump (basic_block);
87 static bool try_forward_edges (int, basic_block);
88 static edge thread_jump (int, edge, basic_block);
89 static bool mark_effect (rtx, bitmap);
90 static void notice_new_block (basic_block);
91 static void update_forwarder_flag (basic_block);
92 static int mentions_nonequal_regs (rtx *, void *);
93 static void merge_memattrs (rtx, rtx);
95 /* Set flags for newly created block. */
98 notice_new_block (basic_block bb)
103 if (forwarder_block_p (bb))
104 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
107 /* Recompute forwarder flag after block has been modified. */
110 update_forwarder_flag (basic_block bb)
112 if (forwarder_block_p (bb))
113 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
115 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
118 /* Simplify a conditional jump around an unconditional jump.
119 Return true if something changed. */
122 try_simplify_condjump (basic_block cbranch_block)
124 basic_block jump_block, jump_dest_block, cbranch_dest_block;
125 edge cbranch_jump_edge, cbranch_fallthru_edge;
128 /* Verify that there are exactly two successors. */
129 if (EDGE_COUNT (cbranch_block->succs) != 2)
132 /* Verify that we've got a normal conditional branch at the end
134 cbranch_insn = BB_END (cbranch_block);
135 if (!any_condjump_p (cbranch_insn))
138 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
139 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
141 /* The next block must not have multiple predecessors, must not
142 be the last block in the function, and must contain just the
143 unconditional jump. */
144 jump_block = cbranch_fallthru_edge->dest;
145 if (!single_pred_p (jump_block)
146 || jump_block->next_bb == EXIT_BLOCK_PTR
147 || !FORWARDER_BLOCK_P (jump_block))
149 jump_dest_block = single_succ (jump_block);
151 /* If we are partitioning hot/cold basic blocks, we don't want to
152 mess up unconditional or indirect jumps that cross between hot
155 Basic block partitioning may result in some jumps that appear to
156 be optimizable (or blocks that appear to be mergeable), but which really
157 must be left untouched (they are required to make it safely across
158 partition boundaries). See the comments at the top of
159 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
161 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
162 || (cbranch_jump_edge->flags & EDGE_CROSSING))
165 /* The conditional branch must target the block after the
166 unconditional branch. */
167 cbranch_dest_block = cbranch_jump_edge->dest;
169 if (cbranch_dest_block == EXIT_BLOCK_PTR
170 || !can_fallthru (jump_block, cbranch_dest_block))
173 /* Invert the conditional branch. */
174 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
178 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
179 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
181 /* Success. Update the CFG to match. Note that after this point
182 the edge variable names appear backwards; the redirection is done
183 this way to preserve edge profile data. */
184 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
186 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
188 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
189 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
190 update_br_prob_note (cbranch_block);
192 /* Delete the block with the unconditional jump, and clean up the mess. */
193 delete_basic_block (jump_block);
194 tidy_fallthru_edge (cbranch_jump_edge);
195 update_forwarder_flag (cbranch_block);
200 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
201 on register. Used by jump threading. */
204 mark_effect (rtx exp, regset nonequal)
208 switch (GET_CODE (exp))
210 /* In case we do clobber the register, mark it as equal, as we know the
211 value is dead so it don't have to match. */
213 if (REG_P (XEXP (exp, 0)))
215 dest = XEXP (exp, 0);
216 regno = REGNO (dest);
217 CLEAR_REGNO_REG_SET (nonequal, regno);
218 if (regno < FIRST_PSEUDO_REGISTER)
220 int n = hard_regno_nregs[regno][GET_MODE (dest)];
222 CLEAR_REGNO_REG_SET (nonequal, regno + n);
228 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
230 dest = SET_DEST (exp);
235 regno = REGNO (dest);
236 SET_REGNO_REG_SET (nonequal, regno);
237 if (regno < FIRST_PSEUDO_REGISTER)
239 int n = hard_regno_nregs[regno][GET_MODE (dest)];
241 SET_REGNO_REG_SET (nonequal, regno + n);
250 /* Return nonzero if X is a register set in regset DATA.
251 Called via for_each_rtx. */
253 mentions_nonequal_regs (rtx *x, void *data)
255 regset nonequal = (regset) data;
261 if (REGNO_REG_SET_P (nonequal, regno))
263 if (regno < FIRST_PSEUDO_REGISTER)
265 int n = hard_regno_nregs[regno][GET_MODE (*x)];
267 if (REGNO_REG_SET_P (nonequal, regno + n))
273 /* Attempt to prove that the basic block B will have no side effects and
274 always continues in the same edge if reached via E. Return the edge
275 if exist, NULL otherwise. */
278 thread_jump (int mode, edge e, basic_block b)
280 rtx set1, set2, cond1, cond2, insn;
281 enum rtx_code code1, code2, reversed_code2;
282 bool reverse1 = false;
286 reg_set_iterator rsi;
288 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
291 /* At the moment, we do handle only conditional jumps, but later we may
292 want to extend this code to tablejumps and others. */
293 if (EDGE_COUNT (e->src->succs) != 2)
295 if (EDGE_COUNT (b->succs) != 2)
297 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
301 /* Second branch must end with onlyjump, as we will eliminate the jump. */
302 if (!any_condjump_p (BB_END (e->src)))
305 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
307 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
311 set1 = pc_set (BB_END (e->src));
312 set2 = pc_set (BB_END (b));
313 if (((e->flags & EDGE_FALLTHRU) != 0)
314 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
317 cond1 = XEXP (SET_SRC (set1), 0);
318 cond2 = XEXP (SET_SRC (set2), 0);
320 code1 = reversed_comparison_code (cond1, BB_END (e->src));
322 code1 = GET_CODE (cond1);
324 code2 = GET_CODE (cond2);
325 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
327 if (!comparison_dominates_p (code1, code2)
328 && !comparison_dominates_p (code1, reversed_code2))
331 /* Ensure that the comparison operators are equivalent.
332 ??? This is far too pessimistic. We should allow swapped operands,
333 different CCmodes, or for example comparisons for interval, that
334 dominate even when operands are not equivalent. */
335 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
336 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
339 /* Short circuit cases where block B contains some side effects, as we can't
341 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
342 insn = NEXT_INSN (insn))
343 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
345 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
351 /* First process all values computed in the source basic block. */
352 for (insn = NEXT_INSN (BB_HEAD (e->src));
353 insn != NEXT_INSN (BB_END (e->src));
354 insn = NEXT_INSN (insn))
356 cselib_process_insn (insn);
358 nonequal = BITMAP_ALLOC (NULL);
359 CLEAR_REG_SET (nonequal);
361 /* Now assume that we've continued by the edge E to B and continue
362 processing as if it were same basic block.
363 Our goal is to prove that whole block is an NOOP. */
365 for (insn = NEXT_INSN (BB_HEAD (b));
366 insn != NEXT_INSN (BB_END (b)) && !failed;
367 insn = NEXT_INSN (insn))
371 rtx pat = PATTERN (insn);
373 if (GET_CODE (pat) == PARALLEL)
375 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
376 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
379 failed |= mark_effect (pat, nonequal);
382 cselib_process_insn (insn);
385 /* Later we should clear nonequal of dead registers. So far we don't
386 have life information in cfg_cleanup. */
389 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
393 /* cond2 must not mention any register that is not equal to the
395 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
398 /* In case liveness information is available, we need to prove equivalence
399 only of the live values. */
400 if (mode & CLEANUP_UPDATE_LIFE)
401 AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
403 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
406 BITMAP_FREE (nonequal);
408 if ((comparison_dominates_p (code1, code2) != 0)
409 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
410 return BRANCH_EDGE (b);
412 return FALLTHRU_EDGE (b);
415 BITMAP_FREE (nonequal);
420 /* Attempt to forward edges leaving basic block B.
421 Return true if successful. */
424 try_forward_edges (int mode, basic_block b)
426 bool changed = false;
428 edge e, *threaded_edges = NULL;
430 /* If we are partitioning hot/cold basic blocks, we don't want to
431 mess up unconditional or indirect jumps that cross between hot
434 Basic block partitioning may result in some jumps that appear to
435 be optimizable (or blocks that appear to be mergeable), but which really m
436 ust be left untouched (they are required to make it safely across
437 partition boundaries). See the comments at the top of
438 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
440 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
443 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
445 basic_block target, first;
447 bool threaded = false;
448 int nthreaded_edges = 0;
449 bool may_thread = first_pass | (b->flags & BB_DIRTY);
451 /* Skip complex edges because we don't know how to update them.
453 Still handle fallthru edges, as we can succeed to forward fallthru
454 edge to the same place as the branch edge of conditional branch
455 and turn conditional branch to an unconditional branch. */
456 if (e->flags & EDGE_COMPLEX)
462 target = first = e->dest;
465 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
466 up jumps that cross between hot/cold sections.
468 Basic block partitioning may result in some jumps that appear
469 to be optimizable (or blocks that appear to be mergeable), but which
470 really must be left untouched (they are required to make it safely
471 across partition boundaries). See the comments at the top of
472 bb-reorder.c:partition_hot_cold_basic_blocks for complete
475 if (first != EXIT_BLOCK_PTR
476 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
479 while (counter < n_basic_blocks)
481 basic_block new_target = NULL;
482 bool new_target_threaded = false;
483 may_thread |= target->flags & BB_DIRTY;
485 if (FORWARDER_BLOCK_P (target)
486 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
487 && single_succ (target) != EXIT_BLOCK_PTR)
489 /* Bypass trivial infinite loops. */
490 new_target = single_succ (target);
491 if (target == new_target)
492 counter = n_basic_blocks;
495 /* Allow to thread only over one edge at time to simplify updating
497 else if ((mode & CLEANUP_THREADING) && may_thread)
499 edge t = thread_jump (mode, e, target);
503 threaded_edges = xmalloc (sizeof (*threaded_edges)
509 /* Detect an infinite loop across blocks not
510 including the start block. */
511 for (i = 0; i < nthreaded_edges; ++i)
512 if (threaded_edges[i] == t)
514 if (i < nthreaded_edges)
516 counter = n_basic_blocks;
521 /* Detect an infinite loop across the start block. */
525 gcc_assert (nthreaded_edges < n_basic_blocks);
526 threaded_edges[nthreaded_edges++] = t;
528 new_target = t->dest;
529 new_target_threaded = true;
536 /* Avoid killing of loop pre-headers, as it is the place loop
537 optimizer wants to hoist code to.
539 For fallthru forwarders, the LOOP_BEG note must appear between
540 the header of block and CODE_LABEL of the loop, for non forwarders
541 it must appear before the JUMP_INSN. */
542 if ((mode & CLEANUP_PRE_LOOP) && optimize && flag_loop_optimize)
544 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
545 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
548 insn = NEXT_INSN (insn);
550 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
551 insn = NEXT_INSN (insn))
553 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
559 /* Do not clean up branches to just past the end of a loop
560 at this time; it can mess up the loop optimizer's
561 recognition of some patterns. */
563 insn = PREV_INSN (BB_HEAD (target));
564 if (insn && NOTE_P (insn)
565 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
571 threaded |= new_target_threaded;
574 if (counter >= n_basic_blocks)
577 fprintf (dump_file, "Infinite loop in BB %i.\n",
580 else if (target == first)
581 ; /* We didn't do anything. */
584 /* Save the values now, as the edge may get removed. */
585 gcov_type edge_count = e->count;
586 int edge_probability = e->probability;
590 /* Don't force if target is exit block. */
591 if (threaded && target != EXIT_BLOCK_PTR)
593 notice_new_block (redirect_edge_and_branch_force (e, target));
595 fprintf (dump_file, "Conditionals threaded.\n");
597 else if (!redirect_edge_and_branch (e, target))
601 "Forwarding edge %i->%i to %i failed.\n",
602 b->index, e->dest->index, target->index);
607 /* We successfully forwarded the edge. Now update profile
608 data: for each edge we traversed in the chain, remove
609 the original edge's execution count. */
610 edge_frequency = ((edge_probability * b->frequency
611 + REG_BR_PROB_BASE / 2)
614 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
615 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
621 if (!single_succ_p (first))
623 gcc_assert (n < nthreaded_edges);
624 t = threaded_edges [n++];
625 gcc_assert (t->src == first);
626 update_bb_profile_for_threading (first, edge_frequency,
628 update_br_prob_note (first);
632 first->count -= edge_count;
633 if (first->count < 0)
635 first->frequency -= edge_frequency;
636 if (first->frequency < 0)
637 first->frequency = 0;
638 /* It is possible that as the result of
639 threading we've removed edge as it is
640 threaded to the fallthru edge. Avoid
641 getting out of sync. */
642 if (n < nthreaded_edges
643 && first == threaded_edges [n]->src)
645 t = single_succ_edge (first);
648 t->count -= edge_count;
653 while (first != target);
662 free (threaded_edges);
667 /* Blocks A and B are to be merged into a single block. A has no incoming
668 fallthru edge, so it can be moved before B without adding or modifying
669 any jumps (aside from the jump from A to B). */
672 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
677 /* If we are partitioning hot/cold basic blocks, we don't want to
678 mess up unconditional or indirect jumps that cross between hot
681 Basic block partitioning may result in some jumps that appear to
682 be optimizable (or blocks that appear to be mergeable), but which really
683 must be left untouched (they are required to make it safely across
684 partition boundaries). See the comments at the top of
685 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
687 if (BB_PARTITION (a) != BB_PARTITION (b))
690 barrier = next_nonnote_insn (BB_END (a));
691 gcc_assert (BARRIER_P (barrier));
692 delete_insn (barrier);
694 /* Move block and loop notes out of the chain so that we do not
697 ??? A better solution would be to squeeze out all the non-nested notes
698 and adjust the block trees appropriately. Even better would be to have
699 a tighter connection between block trees and rtl so that this is not
701 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
702 gcc_assert (!only_notes);
704 /* Scramble the insn chain. */
705 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
706 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
707 a->flags |= BB_DIRTY;
710 fprintf (dump_file, "Moved block %d before %d and merged.\n",
713 /* Swap the records for the two blocks around. */
716 link_block (a, b->prev_bb);
718 /* Now blocks A and B are contiguous. Merge them. */
722 /* Blocks A and B are to be merged into a single block. B has no outgoing
723 fallthru edge, so it can be moved after A without adding or modifying
724 any jumps (aside from the jump from A to B). */
727 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
729 rtx barrier, real_b_end;
733 /* If we are partitioning hot/cold basic blocks, we don't want to
734 mess up unconditional or indirect jumps that cross between hot
737 Basic block partitioning may result in some jumps that appear to
738 be optimizable (or blocks that appear to be mergeable), but which really
739 must be left untouched (they are required to make it safely across
740 partition boundaries). See the comments at the top of
741 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
743 if (BB_PARTITION (a) != BB_PARTITION (b))
746 real_b_end = BB_END (b);
748 /* If there is a jump table following block B temporarily add the jump table
749 to block B so that it will also be moved to the correct location. */
750 if (tablejump_p (BB_END (b), &label, &table)
751 && prev_active_insn (label) == BB_END (b))
756 /* There had better have been a barrier there. Delete it. */
757 barrier = NEXT_INSN (BB_END (b));
758 if (barrier && BARRIER_P (barrier))
759 delete_insn (barrier);
761 /* Move block and loop notes out of the chain so that we do not
764 ??? A better solution would be to squeeze out all the non-nested notes
765 and adjust the block trees appropriately. Even better would be to have
766 a tighter connection between block trees and rtl so that this is not
768 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
769 gcc_assert (!only_notes);
772 /* Scramble the insn chain. */
773 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
775 /* Restore the real end of b. */
776 BB_END (b) = real_b_end;
779 fprintf (dump_file, "Moved block %d after %d and merged.\n",
782 /* Now blocks A and B are contiguous. Merge them. */
786 /* Attempt to merge basic blocks that are potentially non-adjacent.
787 Return NULL iff the attempt failed, otherwise return basic block
788 where cleanup_cfg should continue. Because the merging commonly
789 moves basic block away or introduces another optimization
790 possibility, return basic block just before B so cleanup_cfg don't
793 It may be good idea to return basic block before C in the case
794 C has been moved after B and originally appeared earlier in the
795 insn sequence, but we have no information available about the
796 relative ordering of these two. Hopefully it is not too common. */
799 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
803 /* If we are partitioning hot/cold basic blocks, we don't want to
804 mess up unconditional or indirect jumps that cross between hot
807 Basic block partitioning may result in some jumps that appear to
808 be optimizable (or blocks that appear to be mergeable), but which really
809 must be left untouched (they are required to make it safely across
810 partition boundaries). See the comments at the top of
811 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
813 if (BB_PARTITION (b) != BB_PARTITION (c))
818 /* If B has a fallthru edge to C, no need to move anything. */
819 if (e->flags & EDGE_FALLTHRU)
821 int b_index = b->index, c_index = c->index;
823 update_forwarder_flag (b);
826 fprintf (dump_file, "Merged %d and %d without moving.\n",
829 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
832 /* Otherwise we will need to move code around. Do that only if expensive
833 transformations are allowed. */
834 else if (mode & CLEANUP_EXPENSIVE)
836 edge tmp_edge, b_fallthru_edge;
837 bool c_has_outgoing_fallthru;
838 bool b_has_incoming_fallthru;
841 /* Avoid overactive code motion, as the forwarder blocks should be
842 eliminated by edge redirection instead. One exception might have
843 been if B is a forwarder block and C has no fallthru edge, but
844 that should be cleaned up by bb-reorder instead. */
845 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
848 /* We must make sure to not munge nesting of lexical blocks,
849 and loop notes. This is done by squeezing out all the notes
850 and leaving them there to lie. Not ideal, but functional. */
852 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
853 if (tmp_edge->flags & EDGE_FALLTHRU)
856 c_has_outgoing_fallthru = (tmp_edge != NULL);
858 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
859 if (tmp_edge->flags & EDGE_FALLTHRU)
862 b_has_incoming_fallthru = (tmp_edge != NULL);
863 b_fallthru_edge = tmp_edge;
866 next = next->prev_bb;
868 /* Otherwise, we're going to try to move C after B. If C does
869 not have an outgoing fallthru, then it can be moved
870 immediately after B without introducing or modifying jumps. */
871 if (! c_has_outgoing_fallthru)
873 merge_blocks_move_successor_nojumps (b, c);
874 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
877 /* If B does not have an incoming fallthru, then it can be moved
878 immediately before C without introducing or modifying jumps.
879 C cannot be the first block, so we do not have to worry about
880 accessing a non-existent block. */
882 if (b_has_incoming_fallthru)
886 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
888 bb = force_nonfallthru (b_fallthru_edge);
890 notice_new_block (bb);
893 merge_blocks_move_predecessor_nojumps (b, c);
894 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
901 /* Removes the memory attributes of MEM expression
902 if they are not equal. */
905 merge_memattrs (rtx x, rtx y)
914 if (x == 0 || y == 0)
919 if (code != GET_CODE (y))
922 if (GET_MODE (x) != GET_MODE (y))
925 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
929 else if (! MEM_ATTRS (y))
935 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
937 set_mem_alias_set (x, 0);
938 set_mem_alias_set (y, 0);
941 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
945 set_mem_offset (x, 0);
946 set_mem_offset (y, 0);
948 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
950 set_mem_offset (x, 0);
951 set_mem_offset (y, 0);
956 else if (!MEM_SIZE (y))
959 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
960 INTVAL (MEM_SIZE (y))));
961 set_mem_size (x, mem_size);
962 set_mem_size (y, mem_size);
964 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
965 set_mem_align (y, MEM_ALIGN (x));
969 fmt = GET_RTX_FORMAT (code);
970 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
975 /* Two vectors must have the same length. */
976 if (XVECLEN (x, i) != XVECLEN (y, i))
979 for (j = 0; j < XVECLEN (x, i); j++)
980 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
985 merge_memattrs (XEXP (x, i), XEXP (y, i));
992 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
995 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
999 /* Verify that I1 and I2 are equivalent. */
1000 if (GET_CODE (i1) != GET_CODE (i2))
1006 if (GET_CODE (p1) != GET_CODE (p2))
1009 /* If this is a CALL_INSN, compare register usage information.
1010 If we don't check this on stack register machines, the two
1011 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1012 numbers of stack registers in the same basic block.
1013 If we don't check this on machines with delay slots, a delay slot may
1014 be filled that clobbers a parameter expected by the subroutine.
1016 ??? We take the simple route for now and assume that if they're
1017 equal, they were constructed identically. */
1020 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1021 CALL_INSN_FUNCTION_USAGE (i2))
1022 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1026 /* If cross_jump_death_matters is not 0, the insn's mode
1027 indicates whether or not the insn contains any stack-like
1030 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1032 /* If register stack conversion has already been done, then
1033 death notes must also be compared before it is certain that
1034 the two instruction streams match. */
1037 HARD_REG_SET i1_regset, i2_regset;
1039 CLEAR_HARD_REG_SET (i1_regset);
1040 CLEAR_HARD_REG_SET (i2_regset);
1042 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1043 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1044 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1046 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1047 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1048 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1050 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1059 if (reload_completed
1060 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1063 /* Do not do EQUIV substitution after reload. First, we're undoing the
1064 work of reload_cse. Second, we may be undoing the work of the post-
1065 reload splitting pass. */
1066 /* ??? Possibly add a new phase switch variable that can be used by
1067 targets to disallow the troublesome insns after splitting. */
1068 if (!reload_completed)
1070 /* The following code helps take care of G++ cleanups. */
1071 rtx equiv1 = find_reg_equal_equiv_note (i1);
1072 rtx equiv2 = find_reg_equal_equiv_note (i2);
1074 if (equiv1 && equiv2
1075 /* If the equivalences are not to a constant, they may
1076 reference pseudos that no longer exist, so we can't
1078 && (! reload_completed
1079 || (CONSTANT_P (XEXP (equiv1, 0))
1080 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1082 rtx s1 = single_set (i1);
1083 rtx s2 = single_set (i2);
1084 if (s1 != 0 && s2 != 0
1085 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1087 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1088 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1089 if (! rtx_renumbered_equal_p (p1, p2))
1091 else if (apply_change_group ())
1100 /* Look through the insns at the end of BB1 and BB2 and find the longest
1101 sequence that are equivalent. Store the first insns for that sequence
1102 in *F1 and *F2 and return the sequence length.
1104 To simplify callers of this function, if the blocks match exactly,
1105 store the head of the blocks in *F1 and *F2. */
1108 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1109 basic_block bb2, rtx *f1, rtx *f2)
1111 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1114 /* Skip simple jumps at the end of the blocks. Complex jumps still
1115 need to be compared for equivalence, which we'll do below. */
1118 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1120 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1123 i1 = PREV_INSN (i1);
1128 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1131 /* Count everything except for unconditional jump as insn. */
1132 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1134 i2 = PREV_INSN (i2);
1140 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1141 i1 = PREV_INSN (i1);
1143 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1144 i2 = PREV_INSN (i2);
1146 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1149 if (!insns_match_p (mode, i1, i2))
1152 merge_memattrs (i1, i2);
1154 /* Don't begin a cross-jump with a NOTE insn. */
1157 /* If the merged insns have different REG_EQUAL notes, then
1159 rtx equiv1 = find_reg_equal_equiv_note (i1);
1160 rtx equiv2 = find_reg_equal_equiv_note (i2);
1162 if (equiv1 && !equiv2)
1163 remove_note (i1, equiv1);
1164 else if (!equiv1 && equiv2)
1165 remove_note (i2, equiv2);
1166 else if (equiv1 && equiv2
1167 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1169 remove_note (i1, equiv1);
1170 remove_note (i2, equiv2);
1173 afterlast1 = last1, afterlast2 = last2;
1174 last1 = i1, last2 = i2;
1178 i1 = PREV_INSN (i1);
1179 i2 = PREV_INSN (i2);
1183 /* Don't allow the insn after a compare to be shared by
1184 cross-jumping unless the compare is also shared. */
1185 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1186 last1 = afterlast1, last2 = afterlast2, ninsns--;
1189 /* Include preceding notes and labels in the cross-jump. One,
1190 this may bring us to the head of the blocks as requested above.
1191 Two, it keeps line number notes as matched as may be. */
1194 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1195 last1 = PREV_INSN (last1);
1197 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1198 last1 = PREV_INSN (last1);
1200 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1201 last2 = PREV_INSN (last2);
1203 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1204 last2 = PREV_INSN (last2);
1213 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1214 the branch instruction. This means that if we commonize the control
1215 flow before end of the basic block, the semantic remains unchanged.
1217 We may assume that there exists one edge with a common destination. */
1220 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1222 int nehedges1 = 0, nehedges2 = 0;
1223 edge fallthru1 = 0, fallthru2 = 0;
1227 /* If BB1 has only one successor, we may be looking at either an
1228 unconditional jump, or a fake edge to exit. */
1229 if (single_succ_p (bb1)
1230 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1231 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1232 return (single_succ_p (bb2)
1233 && (single_succ_edge (bb2)->flags
1234 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1235 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1237 /* Match conditional jumps - this may get tricky when fallthru and branch
1238 edges are crossed. */
1239 if (EDGE_COUNT (bb1->succs) == 2
1240 && any_condjump_p (BB_END (bb1))
1241 && onlyjump_p (BB_END (bb1)))
1243 edge b1, f1, b2, f2;
1244 bool reverse, match;
1245 rtx set1, set2, cond1, cond2;
1246 enum rtx_code code1, code2;
1248 if (EDGE_COUNT (bb2->succs) != 2
1249 || !any_condjump_p (BB_END (bb2))
1250 || !onlyjump_p (BB_END (bb2)))
1253 b1 = BRANCH_EDGE (bb1);
1254 b2 = BRANCH_EDGE (bb2);
1255 f1 = FALLTHRU_EDGE (bb1);
1256 f2 = FALLTHRU_EDGE (bb2);
1258 /* Get around possible forwarders on fallthru edges. Other cases
1259 should be optimized out already. */
1260 if (FORWARDER_BLOCK_P (f1->dest))
1261 f1 = single_succ_edge (f1->dest);
1263 if (FORWARDER_BLOCK_P (f2->dest))
1264 f2 = single_succ_edge (f2->dest);
1266 /* To simplify use of this function, return false if there are
1267 unneeded forwarder blocks. These will get eliminated later
1268 during cleanup_cfg. */
1269 if (FORWARDER_BLOCK_P (f1->dest)
1270 || FORWARDER_BLOCK_P (f2->dest)
1271 || FORWARDER_BLOCK_P (b1->dest)
1272 || FORWARDER_BLOCK_P (b2->dest))
1275 if (f1->dest == f2->dest && b1->dest == b2->dest)
1277 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1282 set1 = pc_set (BB_END (bb1));
1283 set2 = pc_set (BB_END (bb2));
1284 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1285 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1288 cond1 = XEXP (SET_SRC (set1), 0);
1289 cond2 = XEXP (SET_SRC (set2), 0);
1290 code1 = GET_CODE (cond1);
1292 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1294 code2 = GET_CODE (cond2);
1296 if (code2 == UNKNOWN)
1299 /* Verify codes and operands match. */
1300 match = ((code1 == code2
1301 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1302 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1303 || (code1 == swap_condition (code2)
1304 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1306 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1309 /* If we return true, we will join the blocks. Which means that
1310 we will only have one branch prediction bit to work with. Thus
1311 we require the existing branches to have probabilities that are
1315 && maybe_hot_bb_p (bb1)
1316 && maybe_hot_bb_p (bb2))
1320 if (b1->dest == b2->dest)
1321 prob2 = b2->probability;
1323 /* Do not use f2 probability as f2 may be forwarded. */
1324 prob2 = REG_BR_PROB_BASE - b2->probability;
1326 /* Fail if the difference in probabilities is greater than 50%.
1327 This rules out two well-predicted branches with opposite
1329 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1333 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1334 bb1->index, bb2->index, b1->probability, prob2);
1340 if (dump_file && match)
1341 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1342 bb1->index, bb2->index);
1347 /* Generic case - we are seeing a computed jump, table jump or trapping
1350 /* Check whether there are tablejumps in the end of BB1 and BB2.
1351 Return true if they are identical. */
1356 if (tablejump_p (BB_END (bb1), &label1, &table1)
1357 && tablejump_p (BB_END (bb2), &label2, &table2)
1358 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1360 /* The labels should never be the same rtx. If they really are same
1361 the jump tables are same too. So disable crossjumping of blocks BB1
1362 and BB2 because when deleting the common insns in the end of BB1
1363 by delete_basic_block () the jump table would be deleted too. */
1364 /* If LABEL2 is referenced in BB1->END do not do anything
1365 because we would loose information when replacing
1366 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1367 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1369 /* Set IDENTICAL to true when the tables are identical. */
1370 bool identical = false;
1373 p1 = PATTERN (table1);
1374 p2 = PATTERN (table2);
1375 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1379 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1380 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1381 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1382 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1387 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1388 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1394 replace_label_data rr;
1397 /* Temporarily replace references to LABEL1 with LABEL2
1398 in BB1->END so that we could compare the instructions. */
1401 rr.update_label_nuses = false;
1402 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1404 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1405 if (dump_file && match)
1407 "Tablejumps in bb %i and %i match.\n",
1408 bb1->index, bb2->index);
1410 /* Set the original label in BB1->END because when deleting
1411 a block whose end is a tablejump, the tablejump referenced
1412 from the instruction is deleted too. */
1415 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1424 /* First ensure that the instructions match. There may be many outgoing
1425 edges so this test is generally cheaper. */
1426 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1429 /* Search the outgoing edges, ensure that the counts do match, find possible
1430 fallthru and exception handling edges since these needs more
1432 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1435 FOR_EACH_EDGE (e1, ei, bb1->succs)
1437 e2 = EDGE_SUCC (bb2, ei.index);
1439 if (e1->flags & EDGE_EH)
1442 if (e2->flags & EDGE_EH)
1445 if (e1->flags & EDGE_FALLTHRU)
1447 if (e2->flags & EDGE_FALLTHRU)
1451 /* If number of edges of various types does not match, fail. */
1452 if (nehedges1 != nehedges2
1453 || (fallthru1 != 0) != (fallthru2 != 0))
1456 /* fallthru edges must be forwarded to the same destination. */
1459 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1460 ? single_succ (fallthru1->dest): fallthru1->dest);
1461 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1462 ? single_succ (fallthru2->dest): fallthru2->dest);
1468 /* Ensure the same EH region. */
1470 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1471 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1476 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1480 /* We don't need to match the rest of edges as above checks should be enough
1481 to ensure that they are equivalent. */
1485 /* E1 and E2 are edges with the same destination block. Search their
1486 predecessors for common code. If found, redirect control flow from
1487 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1490 try_crossjump_to_edge (int mode, edge e1, edge e2)
1493 basic_block src1 = e1->src, src2 = e2->src;
1494 basic_block redirect_to, redirect_from, to_remove;
1495 rtx newpos1, newpos2;
1499 newpos1 = newpos2 = NULL_RTX;
1501 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1502 to try this optimization.
1504 Basic block partitioning may result in some jumps that appear to
1505 be optimizable (or blocks that appear to be mergeable), but which really
1506 must be left untouched (they are required to make it safely across
1507 partition boundaries). See the comments at the top of
1508 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1510 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1513 /* Search backward through forwarder blocks. We don't need to worry
1514 about multiple entry or chained forwarders, as they will be optimized
1515 away. We do this to look past the unconditional jump following a
1516 conditional jump that is required due to the current CFG shape. */
1517 if (single_pred_p (src1)
1518 && FORWARDER_BLOCK_P (src1))
1519 e1 = single_pred_edge (src1), src1 = e1->src;
1521 if (single_pred_p (src2)
1522 && FORWARDER_BLOCK_P (src2))
1523 e2 = single_pred_edge (src2), src2 = e2->src;
1525 /* Nothing to do if we reach ENTRY, or a common source block. */
1526 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1531 /* Seeing more than 1 forwarder blocks would confuse us later... */
1532 if (FORWARDER_BLOCK_P (e1->dest)
1533 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1536 if (FORWARDER_BLOCK_P (e2->dest)
1537 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1540 /* Likewise with dead code (possibly newly created by the other optimizations
1542 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1545 /* Look for the common insn sequence, part the first ... */
1546 if (!outgoing_edges_match (mode, src1, src2))
1549 /* ... and part the second. */
1550 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1552 /* Don't proceed with the crossjump unless we found a sufficient number
1553 of matching instructions or the 'from' block was totally matched
1554 (such that its predecessors will hopefully be redirected and the
1556 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1557 && (newpos1 != BB_HEAD (src1)))
1560 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1562 If we have tablejumps in the end of SRC1 and SRC2
1563 they have been already compared for equivalence in outgoing_edges_match ()
1564 so replace the references to TABLE1 by references to TABLE2. */
1569 if (tablejump_p (BB_END (src1), &label1, &table1)
1570 && tablejump_p (BB_END (src2), &label2, &table2)
1571 && label1 != label2)
1573 replace_label_data rr;
1576 /* Replace references to LABEL1 with LABEL2. */
1579 rr.update_label_nuses = true;
1580 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1582 /* Do not replace the label in SRC1->END because when deleting
1583 a block whose end is a tablejump, the tablejump referenced
1584 from the instruction is deleted too. */
1585 if (insn != BB_END (src1))
1586 for_each_rtx (&insn, replace_label, &rr);
1591 /* Avoid splitting if possible. */
1592 if (newpos2 == BB_HEAD (src2))
1597 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1598 src2->index, nmatch);
1599 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1604 "Cross jumping from bb %i to bb %i; %i common insns\n",
1605 src1->index, src2->index, nmatch);
1607 redirect_to->count += src1->count;
1608 redirect_to->frequency += src1->frequency;
1609 /* We may have some registers visible trought the block. */
1610 redirect_to->flags |= BB_DIRTY;
1612 /* Recompute the frequencies and counts of outgoing edges. */
1613 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1617 basic_block d = s->dest;
1619 if (FORWARDER_BLOCK_P (d))
1620 d = single_succ (d);
1622 FOR_EACH_EDGE (s2, ei, src1->succs)
1624 basic_block d2 = s2->dest;
1625 if (FORWARDER_BLOCK_P (d2))
1626 d2 = single_succ (d2);
1631 s->count += s2->count;
1633 /* Take care to update possible forwarder blocks. We verified
1634 that there is no more than one in the chain, so we can't run
1635 into infinite loop. */
1636 if (FORWARDER_BLOCK_P (s->dest))
1638 single_succ_edge (s->dest)->count += s2->count;
1639 s->dest->count += s2->count;
1640 s->dest->frequency += EDGE_FREQUENCY (s);
1643 if (FORWARDER_BLOCK_P (s2->dest))
1645 single_succ_edge (s2->dest)->count -= s2->count;
1646 if (single_succ_edge (s2->dest)->count < 0)
1647 single_succ_edge (s2->dest)->count = 0;
1648 s2->dest->count -= s2->count;
1649 s2->dest->frequency -= EDGE_FREQUENCY (s);
1650 if (s2->dest->frequency < 0)
1651 s2->dest->frequency = 0;
1652 if (s2->dest->count < 0)
1653 s2->dest->count = 0;
1656 if (!redirect_to->frequency && !src1->frequency)
1657 s->probability = (s->probability + s2->probability) / 2;
1660 = ((s->probability * redirect_to->frequency +
1661 s2->probability * src1->frequency)
1662 / (redirect_to->frequency + src1->frequency));
1665 update_br_prob_note (redirect_to);
1667 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1669 /* Skip possible basic block header. */
1670 if (LABEL_P (newpos1))
1671 newpos1 = NEXT_INSN (newpos1);
1673 if (NOTE_P (newpos1))
1674 newpos1 = NEXT_INSN (newpos1);
1676 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1677 to_remove = single_succ (redirect_from);
1679 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1680 delete_basic_block (to_remove);
1682 update_forwarder_flag (redirect_from);
1683 if (redirect_to != src2)
1684 update_forwarder_flag (src2);
1689 /* Search the predecessors of BB for common insn sequences. When found,
1690 share code between them by redirecting control flow. Return true if
1691 any changes made. */
1694 try_crossjump_bb (int mode, basic_block bb)
1696 edge e, e2, fallthru;
1698 unsigned max, ix, ix2;
1699 basic_block ev, ev2;
1702 /* Nothing to do if there is not at least two incoming edges. */
1703 if (EDGE_COUNT (bb->preds) < 2)
1706 /* Don't crossjump if this block ends in a computed jump,
1707 unless we are optimizing for size. */
1709 && bb != EXIT_BLOCK_PTR
1710 && computed_jump_p (BB_END (bb)))
1713 /* If we are partitioning hot/cold basic blocks, we don't want to
1714 mess up unconditional or indirect jumps that cross between hot
1717 Basic block partitioning may result in some jumps that appear to
1718 be optimizable (or blocks that appear to be mergeable), but which really
1719 must be left untouched (they are required to make it safely across
1720 partition boundaries). See the comments at the top of
1721 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1723 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1724 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1725 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1728 /* It is always cheapest to redirect a block that ends in a branch to
1729 a block that falls through into BB, as that adds no branches to the
1730 program. We'll try that combination first. */
1732 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1734 if (EDGE_COUNT (bb->preds) > max)
1737 FOR_EACH_EDGE (e, ei, bb->preds)
1739 if (e->flags & EDGE_FALLTHRU)
1744 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1746 e = EDGE_PRED (ev, ix);
1749 /* As noted above, first try with the fallthru predecessor. */
1752 /* Don't combine the fallthru edge into anything else.
1753 If there is a match, we'll do it the other way around. */
1756 /* If nothing changed since the last attempt, there is nothing
1759 && (!(e->src->flags & BB_DIRTY)
1760 && !(fallthru->src->flags & BB_DIRTY)))
1763 if (try_crossjump_to_edge (mode, e, fallthru))
1772 /* Non-obvious work limiting check: Recognize that we're going
1773 to call try_crossjump_bb on every basic block. So if we have
1774 two blocks with lots of outgoing edges (a switch) and they
1775 share lots of common destinations, then we would do the
1776 cross-jump check once for each common destination.
1778 Now, if the blocks actually are cross-jump candidates, then
1779 all of their destinations will be shared. Which means that
1780 we only need check them for cross-jump candidacy once. We
1781 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1782 choosing to do the check from the block for which the edge
1783 in question is the first successor of A. */
1784 if (EDGE_SUCC (e->src, 0) != e)
1787 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1789 e2 = EDGE_PRED (ev2, ix2);
1795 /* We've already checked the fallthru edge above. */
1799 /* The "first successor" check above only prevents multiple
1800 checks of crossjump(A,B). In order to prevent redundant
1801 checks of crossjump(B,A), require that A be the block
1802 with the lowest index. */
1803 if (e->src->index > e2->src->index)
1806 /* If nothing changed since the last attempt, there is nothing
1809 && (!(e->src->flags & BB_DIRTY)
1810 && !(e2->src->flags & BB_DIRTY)))
1813 if (try_crossjump_to_edge (mode, e, e2))
1826 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1827 instructions etc. Return nonzero if changes were made. */
1830 try_optimize_cfg (int mode)
1832 bool changed_overall = false;
1835 basic_block bb, b, next;
1837 if (mode & CLEANUP_CROSSJUMP)
1838 add_noreturn_fake_exit_edges ();
1841 update_forwarder_flag (bb);
1843 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1846 if (! targetm.cannot_modify_jumps_p ())
1849 /* Attempt to merge blocks as made possible by edge removal. If
1850 a block has only one successor, and the successor has only
1851 one predecessor, they may be combined. */
1859 "\n\ntry_optimize_cfg iteration %i\n\n",
1862 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1866 bool changed_here = false;
1868 /* Delete trivially dead basic blocks. */
1869 while (EDGE_COUNT (b->preds) == 0)
1873 fprintf (dump_file, "Deleting block %i.\n",
1876 delete_basic_block (b);
1877 if (!(mode & CLEANUP_CFGLAYOUT))
1882 /* Remove code labels no longer used. */
1883 if (single_pred_p (b)
1884 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1885 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1886 && LABEL_P (BB_HEAD (b))
1887 /* If the previous block ends with a branch to this
1888 block, we can't delete the label. Normally this
1889 is a condjump that is yet to be simplified, but
1890 if CASE_DROPS_THRU, this can be a tablejump with
1891 some element going to the same place as the
1892 default (fallthru). */
1893 && (single_pred (b) == ENTRY_BLOCK_PTR
1894 || !JUMP_P (BB_END (single_pred (b)))
1895 || ! label_is_jump_target_p (BB_HEAD (b),
1896 BB_END (single_pred (b)))))
1898 rtx label = BB_HEAD (b);
1900 delete_insn_chain (label, label);
1901 /* In the case label is undeletable, move it after the
1902 BASIC_BLOCK note. */
1903 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1905 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1907 reorder_insns_nobb (label, label, bb_note);
1908 BB_HEAD (b) = bb_note;
1911 fprintf (dump_file, "Deleted label in block %i.\n",
1915 /* If we fall through an empty block, we can remove it. */
1916 if (!(mode & CLEANUP_CFGLAYOUT)
1917 && single_pred_p (b)
1918 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1919 && !LABEL_P (BB_HEAD (b))
1920 && FORWARDER_BLOCK_P (b)
1921 /* Note that forwarder_block_p true ensures that
1922 there is a successor for this block. */
1923 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1924 && n_basic_blocks > 1)
1928 "Deleting fallthru block %i.\n",
1931 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1932 redirect_edge_succ_nodup (single_pred_edge (b),
1934 delete_basic_block (b);
1939 if (single_succ_p (b)
1940 && (s = single_succ_edge (b))
1941 && !(s->flags & EDGE_COMPLEX)
1942 && (c = s->dest) != EXIT_BLOCK_PTR
1943 && single_pred_p (c)
1946 /* When not in cfg_layout mode use code aware of reordering
1947 INSN. This code possibly creates new basic blocks so it
1948 does not fit merge_blocks interface and is kept here in
1949 hope that it will become useless once more of compiler
1950 is transformed to use cfg_layout mode. */
1952 if ((mode & CLEANUP_CFGLAYOUT)
1953 && can_merge_blocks_p (b, c))
1955 merge_blocks (b, c);
1956 update_forwarder_flag (b);
1957 changed_here = true;
1959 else if (!(mode & CLEANUP_CFGLAYOUT)
1960 /* If the jump insn has side effects,
1961 we can't kill the edge. */
1962 && (!JUMP_P (BB_END (b))
1963 || (reload_completed
1964 ? simplejump_p (BB_END (b))
1965 : (onlyjump_p (BB_END (b))
1966 && !tablejump_p (BB_END (b),
1968 && (next = merge_blocks_move (s, b, c, mode)))
1971 changed_here = true;
1975 /* Simplify branch over branch. */
1976 if ((mode & CLEANUP_EXPENSIVE)
1977 && !(mode & CLEANUP_CFGLAYOUT)
1978 && try_simplify_condjump (b))
1979 changed_here = true;
1981 /* If B has a single outgoing edge, but uses a
1982 non-trivial jump instruction without side-effects, we
1983 can either delete the jump entirely, or replace it
1984 with a simple unconditional jump. */
1985 if (single_succ_p (b)
1986 && single_succ (b) != EXIT_BLOCK_PTR
1987 && onlyjump_p (BB_END (b))
1988 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1989 && try_redirect_by_replacing_jump (single_succ_edge (b),
1991 (mode & CLEANUP_CFGLAYOUT) != 0))
1993 update_forwarder_flag (b);
1994 changed_here = true;
1997 /* Simplify branch to branch. */
1998 if (try_forward_edges (mode, b))
1999 changed_here = true;
2001 /* Look for shared code between blocks. */
2002 if ((mode & CLEANUP_CROSSJUMP)
2003 && try_crossjump_bb (mode, b))
2004 changed_here = true;
2006 /* Don't get confused by the index shift caused by
2014 if ((mode & CLEANUP_CROSSJUMP)
2015 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2018 #ifdef ENABLE_CHECKING
2020 verify_flow_info ();
2023 changed_overall |= changed;
2029 if (mode & CLEANUP_CROSSJUMP)
2030 remove_fake_exit_edges ();
2032 clear_aux_for_blocks ();
2034 return changed_overall;
2037 /* Delete all unreachable basic blocks. */
2040 delete_unreachable_blocks (void)
2042 bool changed = false;
2043 basic_block b, next_bb;
2045 find_unreachable_blocks ();
2047 /* Delete all unreachable basic blocks. */
2049 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2051 next_bb = b->next_bb;
2053 if (!(b->flags & BB_REACHABLE))
2055 delete_basic_block (b);
2061 tidy_fallthru_edges ();
2065 /* Merges sequential blocks if possible. */
2068 merge_seq_blocks (void)
2071 bool changed = false;
2073 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2075 if (single_succ_p (bb)
2076 && can_merge_blocks_p (bb, single_succ (bb)))
2078 /* Merge the blocks and retry. */
2079 merge_blocks (bb, single_succ (bb));
2090 /* Tidy the CFG by deleting unreachable code and whatnot. */
2093 cleanup_cfg (int mode)
2095 bool changed = false;
2097 timevar_push (TV_CLEANUP_CFG);
2098 if (delete_unreachable_blocks ())
2101 /* We've possibly created trivially dead code. Cleanup it right
2102 now to introduce more opportunities for try_optimize_cfg. */
2103 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2104 && !reload_completed)
2105 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2110 while (try_optimize_cfg (mode))
2112 delete_unreachable_blocks (), changed = true;
2113 if (mode & CLEANUP_UPDATE_LIFE)
2115 /* Cleaning up CFG introduces more opportunities for dead code
2116 removal that in turn may introduce more opportunities for
2117 cleaning up the CFG. */
2118 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2120 | PROP_SCAN_DEAD_CODE
2121 | PROP_KILL_DEAD_CODE
2122 | ((mode & CLEANUP_LOG_LINKS)
2123 ? PROP_LOG_LINKS : 0)))
2126 else if (!(mode & CLEANUP_NO_INSN_DEL)
2127 && (mode & CLEANUP_EXPENSIVE)
2128 && !reload_completed)
2130 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2135 delete_dead_jumptables ();
2138 timevar_pop (TV_CLEANUP_CFG);
2144 rest_of_handle_jump (void)
2146 delete_unreachable_blocks ();
2148 if (cfun->tail_call_emit)
2149 fixup_tail_calls ();
2152 struct tree_opt_pass pass_jump =
2154 "sibling", /* name */
2156 rest_of_handle_jump, /* execute */
2159 0, /* static_pass_number */
2160 TV_JUMP, /* tv_id */
2161 0, /* properties_required */
2162 0, /* properties_provided */
2163 0, /* properties_destroyed */
2164 TODO_ggc_collect, /* todo_flags_start */
2166 TODO_verify_flow, /* todo_flags_finish */
2172 rest_of_handle_jump2 (void)
2174 /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
2175 before jump optimization switches branch directions. */
2176 if (flag_guess_branch_prob)
2177 expected_value_to_br_prob ();
2179 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2180 reg_scan (get_insns (), max_reg_num ());
2182 dump_flow_info (dump_file);
2183 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP
2184 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2186 create_loop_notes ();
2188 purge_line_number_notes ();
2191 cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
2193 /* Jump optimization, and the removal of NULL pointer checks, may
2194 have reduced the number of instructions substantially. CSE, and
2195 future passes, allocate arrays whose dimensions involve the
2196 maximum instruction UID, so if we can reduce the maximum UID
2197 we'll save big on memory. */
2198 renumber_insns (dump_file);
2202 struct tree_opt_pass pass_jump2 =
2206 rest_of_handle_jump2, /* execute */
2209 0, /* static_pass_number */
2210 TV_JUMP, /* tv_id */
2211 0, /* properties_required */
2212 0, /* properties_provided */
2213 0, /* properties_destroyed */
2214 TODO_ggc_collect, /* todo_flags_start */
2215 TODO_dump_func, /* todo_flags_finish */