1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
43 #include "insn-config.h"
51 #include "cfglayout.h"
54 /* cleanup_cfg maintains following flags for each basic block. */
58 /* Set if BB is the forwarder block to avoid too many
59 forwarder_block_p calls. */
60 BB_FORWARDER_BLOCK = 1,
61 BB_NONTHREADABLE_BLOCK = 2
64 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
65 #define BB_SET_FLAG(BB, FLAG) \
66 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
67 #define BB_CLEAR_FLAG(BB, FLAG) \
68 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72 /* Set to true when we are running first pass of try_optimize_cfg loop. */
73 static bool first_pass;
74 static bool try_crossjump_to_edge (int, edge, edge);
75 static bool try_crossjump_bb (int, basic_block);
76 static bool outgoing_edges_match (int, basic_block, basic_block);
77 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
78 static bool insns_match_p (int, rtx, rtx);
80 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
81 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
82 static bool try_optimize_cfg (int);
83 static bool try_simplify_condjump (basic_block);
84 static bool try_forward_edges (int, basic_block);
85 static edge thread_jump (int, edge, basic_block);
86 static bool mark_effect (rtx, bitmap);
87 static void notice_new_block (basic_block);
88 static void update_forwarder_flag (basic_block);
89 static int mentions_nonequal_regs (rtx *, void *);
90 static void merge_memattrs (rtx, rtx);
92 /* Set flags for newly created block. */
95 notice_new_block (basic_block bb)
100 if (forwarder_block_p (bb))
101 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
104 /* Recompute forwarder flag after block has been modified. */
107 update_forwarder_flag (basic_block bb)
109 if (forwarder_block_p (bb))
110 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
115 /* Simplify a conditional jump around an unconditional jump.
116 Return true if something changed. */
119 try_simplify_condjump (basic_block cbranch_block)
121 basic_block jump_block, jump_dest_block, cbranch_dest_block;
122 edge cbranch_jump_edge, cbranch_fallthru_edge;
125 /* Verify that there are exactly two successors. */
126 if (EDGE_COUNT (cbranch_block->succs) != 2)
129 /* Verify that we've got a normal conditional branch at the end
131 cbranch_insn = BB_END (cbranch_block);
132 if (!any_condjump_p (cbranch_insn))
135 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
136 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
138 /* The next block must not have multiple predecessors, must not
139 be the last block in the function, and must contain just the
140 unconditional jump. */
141 jump_block = cbranch_fallthru_edge->dest;
142 if (EDGE_COUNT (jump_block->preds) >= 2
143 || jump_block->next_bb == EXIT_BLOCK_PTR
144 || !FORWARDER_BLOCK_P (jump_block))
146 jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
148 /* If we are partitioning hot/cold basic blocks, we don't want to
149 mess up unconditional or indirect jumps that cross between hot
152 Basic block partitioning may result in some jumps that appear to
153 be optimizable (or blocks that appear to be mergeable), but which really
154 must be left untouched (they are required to make it safely across
155 partition boundaries). See the comments at the top of
156 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
158 if (flag_reorder_blocks_and_partition
159 && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
160 || (cbranch_jump_edge->flags & EDGE_CROSSING)))
163 /* The conditional branch must target the block after the
164 unconditional branch. */
165 cbranch_dest_block = cbranch_jump_edge->dest;
167 if (cbranch_dest_block == EXIT_BLOCK_PTR
168 || !can_fallthru (jump_block, cbranch_dest_block))
171 /* Invert the conditional branch. */
172 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
176 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
177 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
179 /* Success. Update the CFG to match. Note that after this point
180 the edge variable names appear backwards; the redirection is done
181 this way to preserve edge profile data. */
182 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
184 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
186 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
187 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
188 update_br_prob_note (cbranch_block);
190 /* Delete the block with the unconditional jump, and clean up the mess. */
191 delete_basic_block (jump_block);
192 tidy_fallthru_edge (cbranch_jump_edge);
193 update_forwarder_flag (cbranch_block);
198 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
199 on register. Used by jump threading. */
202 mark_effect (rtx exp, regset nonequal)
206 switch (GET_CODE (exp))
208 /* In case we do clobber the register, mark it as equal, as we know the
209 value is dead so it don't have to match. */
211 if (REG_P (XEXP (exp, 0)))
213 dest = XEXP (exp, 0);
214 regno = REGNO (dest);
215 CLEAR_REGNO_REG_SET (nonequal, regno);
216 if (regno < FIRST_PSEUDO_REGISTER)
218 int n = hard_regno_nregs[regno][GET_MODE (dest)];
220 CLEAR_REGNO_REG_SET (nonequal, regno + n);
226 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
228 dest = SET_DEST (exp);
233 regno = REGNO (dest);
234 SET_REGNO_REG_SET (nonequal, regno);
235 if (regno < FIRST_PSEUDO_REGISTER)
237 int n = hard_regno_nregs[regno][GET_MODE (dest)];
239 SET_REGNO_REG_SET (nonequal, regno + n);
248 /* Return nonzero if X is a register set in regset DATA.
249 Called via for_each_rtx. */
251 mentions_nonequal_regs (rtx *x, void *data)
253 regset nonequal = (regset) data;
259 if (REGNO_REG_SET_P (nonequal, regno))
261 if (regno < FIRST_PSEUDO_REGISTER)
263 int n = hard_regno_nregs[regno][GET_MODE (*x)];
265 if (REGNO_REG_SET_P (nonequal, regno + n))
271 /* Attempt to prove that the basic block B will have no side effects and
272 always continues in the same edge if reached via E. Return the edge
273 if exist, NULL otherwise. */
276 thread_jump (int mode, edge e, basic_block b)
278 rtx set1, set2, cond1, cond2, insn;
279 enum rtx_code code1, code2, reversed_code2;
280 bool reverse1 = false;
284 reg_set_iterator rsi;
286 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
289 /* At the moment, we do handle only conditional jumps, but later we may
290 want to extend this code to tablejumps and others. */
291 if (EDGE_COUNT (e->src->succs) != 2)
293 if (EDGE_COUNT (b->succs) != 2)
295 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
299 /* Second branch must end with onlyjump, as we will eliminate the jump. */
300 if (!any_condjump_p (BB_END (e->src)))
303 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
305 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
309 set1 = pc_set (BB_END (e->src));
310 set2 = pc_set (BB_END (b));
311 if (((e->flags & EDGE_FALLTHRU) != 0)
312 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
315 cond1 = XEXP (SET_SRC (set1), 0);
316 cond2 = XEXP (SET_SRC (set2), 0);
318 code1 = reversed_comparison_code (cond1, BB_END (e->src));
320 code1 = GET_CODE (cond1);
322 code2 = GET_CODE (cond2);
323 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
325 if (!comparison_dominates_p (code1, code2)
326 && !comparison_dominates_p (code1, reversed_code2))
329 /* Ensure that the comparison operators are equivalent.
330 ??? This is far too pessimistic. We should allow swapped operands,
331 different CCmodes, or for example comparisons for interval, that
332 dominate even when operands are not equivalent. */
333 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
334 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
337 /* Short circuit cases where block B contains some side effects, as we can't
339 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
340 insn = NEXT_INSN (insn))
341 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
343 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
349 /* First process all values computed in the source basic block. */
350 for (insn = NEXT_INSN (BB_HEAD (e->src));
351 insn != NEXT_INSN (BB_END (e->src));
352 insn = NEXT_INSN (insn))
354 cselib_process_insn (insn);
356 nonequal = BITMAP_XMALLOC();
357 CLEAR_REG_SET (nonequal);
359 /* Now assume that we've continued by the edge E to B and continue
360 processing as if it were same basic block.
361 Our goal is to prove that whole block is an NOOP. */
363 for (insn = NEXT_INSN (BB_HEAD (b));
364 insn != NEXT_INSN (BB_END (b)) && !failed;
365 insn = NEXT_INSN (insn))
369 rtx pat = PATTERN (insn);
371 if (GET_CODE (pat) == PARALLEL)
373 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
374 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
377 failed |= mark_effect (pat, nonequal);
380 cselib_process_insn (insn);
383 /* Later we should clear nonequal of dead registers. So far we don't
384 have life information in cfg_cleanup. */
387 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
391 /* cond2 must not mention any register that is not equal to the
393 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
396 /* In case liveness information is available, we need to prove equivalence
397 only of the live values. */
398 if (mode & CLEANUP_UPDATE_LIFE)
399 AND_REG_SET (nonequal, b->global_live_at_end);
401 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
404 BITMAP_XFREE (nonequal);
406 if ((comparison_dominates_p (code1, code2) != 0)
407 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
408 return BRANCH_EDGE (b);
410 return FALLTHRU_EDGE (b);
413 BITMAP_XFREE (nonequal);
418 /* Attempt to forward edges leaving basic block B.
419 Return true if successful. */
422 try_forward_edges (int mode, basic_block b)
424 bool changed = false;
426 edge e, *threaded_edges = NULL;
428 /* If we are partitioning hot/cold basic blocks, we don't want to
429 mess up unconditional or indirect jumps that cross between hot
432 Basic block partitioning may result in some jumps that appear to
433 be optimizable (or blocks that appear to be mergeable), but which really m
434 ust be left untouched (they are required to make it safely across
435 partition boundaries). See the comments at the top of
436 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
438 if (flag_reorder_blocks_and_partition
439 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
442 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
444 basic_block target, first;
446 bool threaded = false;
447 int nthreaded_edges = 0;
448 bool may_thread = first_pass | (b->flags & BB_DIRTY);
450 /* Skip complex edges because we don't know how to update them.
452 Still handle fallthru edges, as we can succeed to forward fallthru
453 edge to the same place as the branch edge of conditional branch
454 and turn conditional branch to an unconditional branch. */
455 if (e->flags & EDGE_COMPLEX)
461 target = first = e->dest;
464 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
465 up jumps that cross between hot/cold sections.
467 Basic block partitioning may result in some jumps that appear
468 to be optimizable (or blocks that appear to be mergeable), but which
469 really must be left untouched (they are required to make it safely
470 across partition boundaries). See the comments at the top of
471 bb-reorder.c:partition_hot_cold_basic_blocks for complete
474 if (flag_reorder_blocks_and_partition
475 && first != EXIT_BLOCK_PTR
476 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
479 while (counter < n_basic_blocks)
481 basic_block new_target = NULL;
482 bool new_target_threaded = false;
483 may_thread |= target->flags & BB_DIRTY;
485 if (FORWARDER_BLOCK_P (target)
486 && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
487 && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
489 /* Bypass trivial infinite loops. */
490 if (target == EDGE_SUCC (target, 0)->dest)
491 counter = n_basic_blocks;
492 new_target = EDGE_SUCC (target, 0)->dest;
495 /* Allow to thread only over one edge at time to simplify updating
497 else if ((mode & CLEANUP_THREADING) && may_thread)
499 edge t = thread_jump (mode, e, target);
503 threaded_edges = xmalloc (sizeof (*threaded_edges)
509 /* Detect an infinite loop across blocks not
510 including the start block. */
511 for (i = 0; i < nthreaded_edges; ++i)
512 if (threaded_edges[i] == t)
514 if (i < nthreaded_edges)
516 counter = n_basic_blocks;
521 /* Detect an infinite loop across the start block. */
525 gcc_assert (nthreaded_edges < n_basic_blocks);
526 threaded_edges[nthreaded_edges++] = t;
528 new_target = t->dest;
529 new_target_threaded = true;
536 /* Avoid killing of loop pre-headers, as it is the place loop
537 optimizer wants to hoist code to.
539 For fallthru forwarders, the LOOP_BEG note must appear between
540 the header of block and CODE_LABEL of the loop, for non forwarders
541 it must appear before the JUMP_INSN. */
542 if ((mode & CLEANUP_PRE_LOOP) && optimize)
544 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
545 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
548 insn = NEXT_INSN (insn);
550 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
551 insn = NEXT_INSN (insn))
553 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
559 /* Do not clean up branches to just past the end of a loop
560 at this time; it can mess up the loop optimizer's
561 recognition of some patterns. */
563 insn = PREV_INSN (BB_HEAD (target));
564 if (insn && NOTE_P (insn)
565 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
571 threaded |= new_target_threaded;
574 if (counter >= n_basic_blocks)
577 fprintf (dump_file, "Infinite loop in BB %i.\n",
580 else if (target == first)
581 ; /* We didn't do anything. */
584 /* Save the values now, as the edge may get removed. */
585 gcov_type edge_count = e->count;
586 int edge_probability = e->probability;
590 /* Don't force if target is exit block. */
591 if (threaded && target != EXIT_BLOCK_PTR)
593 notice_new_block (redirect_edge_and_branch_force (e, target));
595 fprintf (dump_file, "Conditionals threaded.\n");
597 else if (!redirect_edge_and_branch (e, target))
601 "Forwarding edge %i->%i to %i failed.\n",
602 b->index, e->dest->index, target->index);
607 /* We successfully forwarded the edge. Now update profile
608 data: for each edge we traversed in the chain, remove
609 the original edge's execution count. */
610 edge_frequency = ((edge_probability * b->frequency
611 + REG_BR_PROB_BASE / 2)
614 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
615 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
621 if (EDGE_COUNT (first->succs) > 1)
623 gcc_assert (n < nthreaded_edges);
624 t = threaded_edges [n++];
625 gcc_assert (t->src == first);
626 update_bb_profile_for_threading (first, edge_frequency,
628 update_br_prob_note (first);
632 first->count -= edge_count;
633 if (first->count < 0)
635 first->frequency -= edge_frequency;
636 if (first->frequency < 0)
637 first->frequency = 0;
638 /* It is possible that as the result of
639 threading we've removed edge as it is
640 threaded to the fallthru edge. Avoid
641 getting out of sync. */
642 if (n < nthreaded_edges
643 && first == threaded_edges [n]->src)
645 t = EDGE_SUCC (first, 0);
648 t->count -= edge_count;
653 while (first != target);
662 free (threaded_edges);
667 /* Blocks A and B are to be merged into a single block. A has no incoming
668 fallthru edge, so it can be moved before B without adding or modifying
669 any jumps (aside from the jump from A to B). */
672 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
677 /* If we are partitioning hot/cold basic blocks, we don't want to
678 mess up unconditional or indirect jumps that cross between hot
681 Basic block partitioning may result in some jumps that appear to
682 be optimizable (or blocks that appear to be mergeable), but which really
683 must be left untouched (they are required to make it safely across
684 partition boundaries). See the comments at the top of
685 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
687 if (flag_reorder_blocks_and_partition
688 && (BB_PARTITION (a) != BB_PARTITION (b)
689 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
692 barrier = next_nonnote_insn (BB_END (a));
693 gcc_assert (BARRIER_P (barrier));
694 delete_insn (barrier);
696 /* Move block and loop notes out of the chain so that we do not
699 ??? A better solution would be to squeeze out all the non-nested notes
700 and adjust the block trees appropriately. Even better would be to have
701 a tighter connection between block trees and rtl so that this is not
703 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
704 gcc_assert (!only_notes);
706 /* Scramble the insn chain. */
707 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
708 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
709 a->flags |= BB_DIRTY;
712 fprintf (dump_file, "Moved block %d before %d and merged.\n",
715 /* Swap the records for the two blocks around. */
718 link_block (a, b->prev_bb);
720 /* Now blocks A and B are contiguous. Merge them. */
724 /* Blocks A and B are to be merged into a single block. B has no outgoing
725 fallthru edge, so it can be moved after A without adding or modifying
726 any jumps (aside from the jump from A to B). */
729 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
731 rtx barrier, real_b_end;
735 /* If we are partitioning hot/cold basic blocks, we don't want to
736 mess up unconditional or indirect jumps that cross between hot
739 Basic block partitioning may result in some jumps that appear to
740 be optimizable (or blocks that appear to be mergeable), but which really
741 must be left untouched (they are required to make it safely across
742 partition boundaries). See the comments at the top of
743 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
745 if (flag_reorder_blocks_and_partition
746 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
747 || BB_PARTITION (a) != BB_PARTITION (b)))
750 real_b_end = BB_END (b);
752 /* If there is a jump table following block B temporarily add the jump table
753 to block B so that it will also be moved to the correct location. */
754 if (tablejump_p (BB_END (b), &label, &table)
755 && prev_active_insn (label) == BB_END (b))
760 /* There had better have been a barrier there. Delete it. */
761 barrier = NEXT_INSN (BB_END (b));
762 if (barrier && BARRIER_P (barrier))
763 delete_insn (barrier);
765 /* Move block and loop notes out of the chain so that we do not
768 ??? A better solution would be to squeeze out all the non-nested notes
769 and adjust the block trees appropriately. Even better would be to have
770 a tighter connection between block trees and rtl so that this is not
772 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
773 gcc_assert (!only_notes);
776 /* Scramble the insn chain. */
777 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
779 /* Restore the real end of b. */
780 BB_END (b) = real_b_end;
783 fprintf (dump_file, "Moved block %d after %d and merged.\n",
786 /* Now blocks A and B are contiguous. Merge them. */
790 /* Attempt to merge basic blocks that are potentially non-adjacent.
791 Return NULL iff the attempt failed, otherwise return basic block
792 where cleanup_cfg should continue. Because the merging commonly
793 moves basic block away or introduces another optimization
794 possibility, return basic block just before B so cleanup_cfg don't
797 It may be good idea to return basic block before C in the case
798 C has been moved after B and originally appeared earlier in the
799 insn sequence, but we have no information available about the
800 relative ordering of these two. Hopefully it is not too common. */
803 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
807 /* If we are partitioning hot/cold basic blocks, we don't want to
808 mess up unconditional or indirect jumps that cross between hot
811 Basic block partitioning may result in some jumps that appear to
812 be optimizable (or blocks that appear to be mergeable), but which really
813 must be left untouched (they are required to make it safely across
814 partition boundaries). See the comments at the top of
815 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
817 if (flag_reorder_blocks_and_partition
818 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
819 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
820 || BB_PARTITION (b) != BB_PARTITION (c)))
825 /* If B has a fallthru edge to C, no need to move anything. */
826 if (e->flags & EDGE_FALLTHRU)
828 int b_index = b->index, c_index = c->index;
830 update_forwarder_flag (b);
833 fprintf (dump_file, "Merged %d and %d without moving.\n",
836 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
839 /* Otherwise we will need to move code around. Do that only if expensive
840 transformations are allowed. */
841 else if (mode & CLEANUP_EXPENSIVE)
843 edge tmp_edge, b_fallthru_edge;
844 bool c_has_outgoing_fallthru;
845 bool b_has_incoming_fallthru;
848 /* Avoid overactive code motion, as the forwarder blocks should be
849 eliminated by edge redirection instead. One exception might have
850 been if B is a forwarder block and C has no fallthru edge, but
851 that should be cleaned up by bb-reorder instead. */
852 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
855 /* We must make sure to not munge nesting of lexical blocks,
856 and loop notes. This is done by squeezing out all the notes
857 and leaving them there to lie. Not ideal, but functional. */
859 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
860 if (tmp_edge->flags & EDGE_FALLTHRU)
863 c_has_outgoing_fallthru = (tmp_edge != NULL);
865 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
866 if (tmp_edge->flags & EDGE_FALLTHRU)
869 b_has_incoming_fallthru = (tmp_edge != NULL);
870 b_fallthru_edge = tmp_edge;
873 next = next->prev_bb;
875 /* Otherwise, we're going to try to move C after B. If C does
876 not have an outgoing fallthru, then it can be moved
877 immediately after B without introducing or modifying jumps. */
878 if (! c_has_outgoing_fallthru)
880 merge_blocks_move_successor_nojumps (b, c);
881 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
884 /* If B does not have an incoming fallthru, then it can be moved
885 immediately before C without introducing or modifying jumps.
886 C cannot be the first block, so we do not have to worry about
887 accessing a non-existent block. */
889 if (b_has_incoming_fallthru)
893 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
895 bb = force_nonfallthru (b_fallthru_edge);
897 notice_new_block (bb);
900 merge_blocks_move_predecessor_nojumps (b, c);
901 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
908 /* Removes the memory attributes of MEM expression
909 if they are not equal. */
912 merge_memattrs (rtx x, rtx y)
921 if (x == 0 || y == 0)
926 if (code != GET_CODE (y))
929 if (GET_MODE (x) != GET_MODE (y))
932 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
936 else if (! MEM_ATTRS (y))
942 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
944 set_mem_alias_set (x, 0);
945 set_mem_alias_set (y, 0);
948 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
952 set_mem_offset (x, 0);
953 set_mem_offset (y, 0);
955 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
957 set_mem_offset (x, 0);
958 set_mem_offset (y, 0);
963 else if (!MEM_SIZE (y))
966 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
967 INTVAL (MEM_SIZE (y))));
968 set_mem_size (x, mem_size);
969 set_mem_size (y, mem_size);
971 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
972 set_mem_align (y, MEM_ALIGN (x));
976 fmt = GET_RTX_FORMAT (code);
977 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
982 /* Two vectors must have the same length. */
983 if (XVECLEN (x, i) != XVECLEN (y, i))
986 for (j = 0; j < XVECLEN (x, i); j++)
987 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
992 merge_memattrs (XEXP (x, i), XEXP (y, i));
999 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
1002 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1006 /* Verify that I1 and I2 are equivalent. */
1007 if (GET_CODE (i1) != GET_CODE (i2))
1013 if (GET_CODE (p1) != GET_CODE (p2))
1016 /* If this is a CALL_INSN, compare register usage information.
1017 If we don't check this on stack register machines, the two
1018 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1019 numbers of stack registers in the same basic block.
1020 If we don't check this on machines with delay slots, a delay slot may
1021 be filled that clobbers a parameter expected by the subroutine.
1023 ??? We take the simple route for now and assume that if they're
1024 equal, they were constructed identically. */
1027 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1028 CALL_INSN_FUNCTION_USAGE (i2))
1029 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1033 /* If cross_jump_death_matters is not 0, the insn's mode
1034 indicates whether or not the insn contains any stack-like
1037 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1039 /* If register stack conversion has already been done, then
1040 death notes must also be compared before it is certain that
1041 the two instruction streams match. */
1044 HARD_REG_SET i1_regset, i2_regset;
1046 CLEAR_HARD_REG_SET (i1_regset);
1047 CLEAR_HARD_REG_SET (i2_regset);
1049 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1050 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1051 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1053 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1054 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1055 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1057 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1066 if (reload_completed
1067 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1070 /* Do not do EQUIV substitution after reload. First, we're undoing the
1071 work of reload_cse. Second, we may be undoing the work of the post-
1072 reload splitting pass. */
1073 /* ??? Possibly add a new phase switch variable that can be used by
1074 targets to disallow the troublesome insns after splitting. */
1075 if (!reload_completed)
1077 /* The following code helps take care of G++ cleanups. */
1078 rtx equiv1 = find_reg_equal_equiv_note (i1);
1079 rtx equiv2 = find_reg_equal_equiv_note (i2);
1081 if (equiv1 && equiv2
1082 /* If the equivalences are not to a constant, they may
1083 reference pseudos that no longer exist, so we can't
1085 && (! reload_completed
1086 || (CONSTANT_P (XEXP (equiv1, 0))
1087 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1089 rtx s1 = single_set (i1);
1090 rtx s2 = single_set (i2);
1091 if (s1 != 0 && s2 != 0
1092 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1094 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1095 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1096 if (! rtx_renumbered_equal_p (p1, p2))
1098 else if (apply_change_group ())
1107 /* Look through the insns at the end of BB1 and BB2 and find the longest
1108 sequence that are equivalent. Store the first insns for that sequence
1109 in *F1 and *F2 and return the sequence length.
1111 To simplify callers of this function, if the blocks match exactly,
1112 store the head of the blocks in *F1 and *F2. */
1115 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1116 basic_block bb2, rtx *f1, rtx *f2)
1118 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1121 /* Skip simple jumps at the end of the blocks. Complex jumps still
1122 need to be compared for equivalence, which we'll do below. */
1125 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1127 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1130 i1 = PREV_INSN (i1);
1135 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1138 /* Count everything except for unconditional jump as insn. */
1139 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1141 i2 = PREV_INSN (i2);
1147 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1148 i1 = PREV_INSN (i1);
1150 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1151 i2 = PREV_INSN (i2);
1153 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1156 if (!insns_match_p (mode, i1, i2))
1159 merge_memattrs (i1, i2);
1161 /* Don't begin a cross-jump with a NOTE insn. */
1164 /* If the merged insns have different REG_EQUAL notes, then
1166 rtx equiv1 = find_reg_equal_equiv_note (i1);
1167 rtx equiv2 = find_reg_equal_equiv_note (i2);
1169 if (equiv1 && !equiv2)
1170 remove_note (i1, equiv1);
1171 else if (!equiv1 && equiv2)
1172 remove_note (i2, equiv2);
1173 else if (equiv1 && equiv2
1174 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1176 remove_note (i1, equiv1);
1177 remove_note (i2, equiv2);
1180 afterlast1 = last1, afterlast2 = last2;
1181 last1 = i1, last2 = i2;
1185 i1 = PREV_INSN (i1);
1186 i2 = PREV_INSN (i2);
1190 /* Don't allow the insn after a compare to be shared by
1191 cross-jumping unless the compare is also shared. */
1192 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1193 last1 = afterlast1, last2 = afterlast2, ninsns--;
1196 /* Include preceding notes and labels in the cross-jump. One,
1197 this may bring us to the head of the blocks as requested above.
1198 Two, it keeps line number notes as matched as may be. */
1201 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1202 last1 = PREV_INSN (last1);
1204 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1205 last1 = PREV_INSN (last1);
1207 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1208 last2 = PREV_INSN (last2);
1210 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1211 last2 = PREV_INSN (last2);
1220 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1221 the branch instruction. This means that if we commonize the control
1222 flow before end of the basic block, the semantic remains unchanged.
1224 We may assume that there exists one edge with a common destination. */
1227 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1229 int nehedges1 = 0, nehedges2 = 0;
1230 edge fallthru1 = 0, fallthru2 = 0;
1234 /* If BB1 has only one successor, we may be looking at either an
1235 unconditional jump, or a fake edge to exit. */
1236 if (EDGE_COUNT (bb1->succs) == 1
1237 && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1238 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1239 return (EDGE_COUNT (bb2->succs) == 1
1240 && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1241 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1243 /* Match conditional jumps - this may get tricky when fallthru and branch
1244 edges are crossed. */
1245 if (EDGE_COUNT (bb1->succs) == 2
1246 && any_condjump_p (BB_END (bb1))
1247 && onlyjump_p (BB_END (bb1)))
1249 edge b1, f1, b2, f2;
1250 bool reverse, match;
1251 rtx set1, set2, cond1, cond2;
1252 enum rtx_code code1, code2;
1254 if (EDGE_COUNT (bb2->succs) != 2
1255 || !any_condjump_p (BB_END (bb2))
1256 || !onlyjump_p (BB_END (bb2)))
1259 b1 = BRANCH_EDGE (bb1);
1260 b2 = BRANCH_EDGE (bb2);
1261 f1 = FALLTHRU_EDGE (bb1);
1262 f2 = FALLTHRU_EDGE (bb2);
1264 /* Get around possible forwarders on fallthru edges. Other cases
1265 should be optimized out already. */
1266 if (FORWARDER_BLOCK_P (f1->dest))
1267 f1 = EDGE_SUCC (f1->dest, 0);
1269 if (FORWARDER_BLOCK_P (f2->dest))
1270 f2 = EDGE_SUCC (f2->dest, 0);
1272 /* To simplify use of this function, return false if there are
1273 unneeded forwarder blocks. These will get eliminated later
1274 during cleanup_cfg. */
1275 if (FORWARDER_BLOCK_P (f1->dest)
1276 || FORWARDER_BLOCK_P (f2->dest)
1277 || FORWARDER_BLOCK_P (b1->dest)
1278 || FORWARDER_BLOCK_P (b2->dest))
1281 if (f1->dest == f2->dest && b1->dest == b2->dest)
1283 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1288 set1 = pc_set (BB_END (bb1));
1289 set2 = pc_set (BB_END (bb2));
1290 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1291 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1294 cond1 = XEXP (SET_SRC (set1), 0);
1295 cond2 = XEXP (SET_SRC (set2), 0);
1296 code1 = GET_CODE (cond1);
1298 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1300 code2 = GET_CODE (cond2);
1302 if (code2 == UNKNOWN)
1305 /* Verify codes and operands match. */
1306 match = ((code1 == code2
1307 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1308 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1309 || (code1 == swap_condition (code2)
1310 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1312 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1315 /* If we return true, we will join the blocks. Which means that
1316 we will only have one branch prediction bit to work with. Thus
1317 we require the existing branches to have probabilities that are
1321 && maybe_hot_bb_p (bb1)
1322 && maybe_hot_bb_p (bb2))
1326 if (b1->dest == b2->dest)
1327 prob2 = b2->probability;
1329 /* Do not use f2 probability as f2 may be forwarded. */
1330 prob2 = REG_BR_PROB_BASE - b2->probability;
1332 /* Fail if the difference in probabilities is greater than 50%.
1333 This rules out two well-predicted branches with opposite
1335 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1339 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1340 bb1->index, bb2->index, b1->probability, prob2);
1346 if (dump_file && match)
1347 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1348 bb1->index, bb2->index);
1353 /* Generic case - we are seeing a computed jump, table jump or trapping
1356 #ifndef CASE_DROPS_THROUGH
1357 /* Check whether there are tablejumps in the end of BB1 and BB2.
1358 Return true if they are identical. */
1363 if (tablejump_p (BB_END (bb1), &label1, &table1)
1364 && tablejump_p (BB_END (bb2), &label2, &table2)
1365 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1367 /* The labels should never be the same rtx. If they really are same
1368 the jump tables are same too. So disable crossjumping of blocks BB1
1369 and BB2 because when deleting the common insns in the end of BB1
1370 by delete_basic_block () the jump table would be deleted too. */
1371 /* If LABEL2 is referenced in BB1->END do not do anything
1372 because we would loose information when replacing
1373 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1374 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1376 /* Set IDENTICAL to true when the tables are identical. */
1377 bool identical = false;
1380 p1 = PATTERN (table1);
1381 p2 = PATTERN (table2);
1382 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1386 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1387 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1388 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1389 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1394 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1395 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1401 replace_label_data rr;
1404 /* Temporarily replace references to LABEL1 with LABEL2
1405 in BB1->END so that we could compare the instructions. */
1408 rr.update_label_nuses = false;
1409 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1411 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1412 if (dump_file && match)
1414 "Tablejumps in bb %i and %i match.\n",
1415 bb1->index, bb2->index);
1417 /* Set the original label in BB1->END because when deleting
1418 a block whose end is a tablejump, the tablejump referenced
1419 from the instruction is deleted too. */
1422 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1432 /* First ensure that the instructions match. There may be many outgoing
1433 edges so this test is generally cheaper. */
1434 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1437 /* Search the outgoing edges, ensure that the counts do match, find possible
1438 fallthru and exception handling edges since these needs more
1440 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1443 FOR_EACH_EDGE (e1, ei, bb1->succs)
1445 e2 = EDGE_SUCC (bb2, ei.index);
1447 if (e1->flags & EDGE_EH)
1450 if (e2->flags & EDGE_EH)
1453 if (e1->flags & EDGE_FALLTHRU)
1455 if (e2->flags & EDGE_FALLTHRU)
1459 /* If number of edges of various types does not match, fail. */
1460 if (nehedges1 != nehedges2
1461 || (fallthru1 != 0) != (fallthru2 != 0))
1464 /* fallthru edges must be forwarded to the same destination. */
1467 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1468 ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
1469 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1470 ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
1476 /* Ensure the same EH region. */
1478 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1479 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1484 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1488 /* We don't need to match the rest of edges as above checks should be enough
1489 to ensure that they are equivalent. */
1493 /* E1 and E2 are edges with the same destination block. Search their
1494 predecessors for common code. If found, redirect control flow from
1495 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1498 try_crossjump_to_edge (int mode, edge e1, edge e2)
1501 basic_block src1 = e1->src, src2 = e2->src;
1502 basic_block redirect_to, redirect_from, to_remove;
1503 rtx newpos1, newpos2;
1507 newpos1 = newpos2 = NULL_RTX;
1509 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1510 to try this optimization.
1512 Basic block partitioning may result in some jumps that appear to
1513 be optimizable (or blocks that appear to be mergeable), but which really
1514 must be left untouched (they are required to make it safely across
1515 partition boundaries). See the comments at the top of
1516 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1518 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1521 /* Search backward through forwarder blocks. We don't need to worry
1522 about multiple entry or chained forwarders, as they will be optimized
1523 away. We do this to look past the unconditional jump following a
1524 conditional jump that is required due to the current CFG shape. */
1525 if (EDGE_COUNT (src1->preds) == 1
1526 && FORWARDER_BLOCK_P (src1))
1527 e1 = EDGE_PRED (src1, 0), src1 = e1->src;
1529 if (EDGE_COUNT (src2->preds) == 1
1530 && FORWARDER_BLOCK_P (src2))
1531 e2 = EDGE_PRED (src2, 0), src2 = e2->src;
1533 /* Nothing to do if we reach ENTRY, or a common source block. */
1534 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1539 /* Seeing more than 1 forwarder blocks would confuse us later... */
1540 if (FORWARDER_BLOCK_P (e1->dest)
1541 && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
1544 if (FORWARDER_BLOCK_P (e2->dest)
1545 && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
1548 /* Likewise with dead code (possibly newly created by the other optimizations
1550 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1553 /* Look for the common insn sequence, part the first ... */
1554 if (!outgoing_edges_match (mode, src1, src2))
1557 /* ... and part the second. */
1558 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1560 /* Don't proceed with the crossjump unless we found a sufficient number
1561 of matching instructions or the 'from' block was totally matched
1562 (such that its predecessors will hopefully be redirected and the
1564 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1565 && (newpos1 != BB_HEAD (src1)))
1568 #ifndef CASE_DROPS_THROUGH
1569 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1571 If we have tablejumps in the end of SRC1 and SRC2
1572 they have been already compared for equivalence in outgoing_edges_match ()
1573 so replace the references to TABLE1 by references to TABLE2. */
1578 if (tablejump_p (BB_END (src1), &label1, &table1)
1579 && tablejump_p (BB_END (src2), &label2, &table2)
1580 && label1 != label2)
1582 replace_label_data rr;
1585 /* Replace references to LABEL1 with LABEL2. */
1588 rr.update_label_nuses = true;
1589 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1591 /* Do not replace the label in SRC1->END because when deleting
1592 a block whose end is a tablejump, the tablejump referenced
1593 from the instruction is deleted too. */
1594 if (insn != BB_END (src1))
1595 for_each_rtx (&insn, replace_label, &rr);
1601 /* Avoid splitting if possible. */
1602 if (newpos2 == BB_HEAD (src2))
1607 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1608 src2->index, nmatch);
1609 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1614 "Cross jumping from bb %i to bb %i; %i common insns\n",
1615 src1->index, src2->index, nmatch);
1617 redirect_to->count += src1->count;
1618 redirect_to->frequency += src1->frequency;
1619 /* We may have some registers visible trought the block. */
1620 redirect_to->flags |= BB_DIRTY;
1622 /* Recompute the frequencies and counts of outgoing edges. */
1623 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1627 basic_block d = s->dest;
1629 if (FORWARDER_BLOCK_P (d))
1630 d = EDGE_SUCC (d, 0)->dest;
1632 FOR_EACH_EDGE (s2, ei, src1->succs)
1634 basic_block d2 = s2->dest;
1635 if (FORWARDER_BLOCK_P (d2))
1636 d2 = EDGE_SUCC (d2, 0)->dest;
1641 s->count += s2->count;
1643 /* Take care to update possible forwarder blocks. We verified
1644 that there is no more than one in the chain, so we can't run
1645 into infinite loop. */
1646 if (FORWARDER_BLOCK_P (s->dest))
1648 EDGE_SUCC (s->dest, 0)->count += s2->count;
1649 s->dest->count += s2->count;
1650 s->dest->frequency += EDGE_FREQUENCY (s);
1653 if (FORWARDER_BLOCK_P (s2->dest))
1655 EDGE_SUCC (s2->dest, 0)->count -= s2->count;
1656 if (EDGE_SUCC (s2->dest, 0)->count < 0)
1657 EDGE_SUCC (s2->dest, 0)->count = 0;
1658 s2->dest->count -= s2->count;
1659 s2->dest->frequency -= EDGE_FREQUENCY (s);
1660 if (s2->dest->frequency < 0)
1661 s2->dest->frequency = 0;
1662 if (s2->dest->count < 0)
1663 s2->dest->count = 0;
1666 if (!redirect_to->frequency && !src1->frequency)
1667 s->probability = (s->probability + s2->probability) / 2;
1670 = ((s->probability * redirect_to->frequency +
1671 s2->probability * src1->frequency)
1672 / (redirect_to->frequency + src1->frequency));
1675 update_br_prob_note (redirect_to);
1677 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1679 /* Skip possible basic block header. */
1680 if (LABEL_P (newpos1))
1681 newpos1 = NEXT_INSN (newpos1);
1683 if (NOTE_P (newpos1))
1684 newpos1 = NEXT_INSN (newpos1);
1686 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1687 to_remove = EDGE_SUCC (redirect_from, 0)->dest;
1689 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
1690 delete_basic_block (to_remove);
1692 update_forwarder_flag (redirect_from);
1697 /* Search the predecessors of BB for common insn sequences. When found,
1698 share code between them by redirecting control flow. Return true if
1699 any changes made. */
1702 try_crossjump_bb (int mode, basic_block bb)
1704 edge e, e2, fallthru;
1706 unsigned max, ix, ix2;
1707 basic_block ev, ev2;
1710 /* Nothing to do if there is not at least two incoming edges. */
1711 if (EDGE_COUNT (bb->preds) < 2)
1714 /* If we are partitioning hot/cold basic blocks, we don't want to
1715 mess up unconditional or indirect jumps that cross between hot
1718 Basic block partitioning may result in some jumps that appear to
1719 be optimizable (or blocks that appear to be mergeable), but which really
1720 must be left untouched (they are required to make it safely across
1721 partition boundaries). See the comments at the top of
1722 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1724 if (flag_reorder_blocks_and_partition
1725 && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
1726 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
1729 /* It is always cheapest to redirect a block that ends in a branch to
1730 a block that falls through into BB, as that adds no branches to the
1731 program. We'll try that combination first. */
1733 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1735 if (EDGE_COUNT (bb->preds) > max)
1738 FOR_EACH_EDGE (e, ei, bb->preds)
1740 if (e->flags & EDGE_FALLTHRU)
1745 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1747 e = EDGE_PRED (ev, ix);
1750 /* As noted above, first try with the fallthru predecessor. */
1753 /* Don't combine the fallthru edge into anything else.
1754 If there is a match, we'll do it the other way around. */
1757 /* If nothing changed since the last attempt, there is nothing
1760 && (!(e->src->flags & BB_DIRTY)
1761 && !(fallthru->src->flags & BB_DIRTY)))
1764 if (try_crossjump_to_edge (mode, e, fallthru))
1773 /* Non-obvious work limiting check: Recognize that we're going
1774 to call try_crossjump_bb on every basic block. So if we have
1775 two blocks with lots of outgoing edges (a switch) and they
1776 share lots of common destinations, then we would do the
1777 cross-jump check once for each common destination.
1779 Now, if the blocks actually are cross-jump candidates, then
1780 all of their destinations will be shared. Which means that
1781 we only need check them for cross-jump candidacy once. We
1782 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1783 choosing to do the check from the block for which the edge
1784 in question is the first successor of A. */
1785 if (EDGE_SUCC (e->src, 0) != e)
1788 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1790 e2 = EDGE_PRED (ev2, ix2);
1796 /* We've already checked the fallthru edge above. */
1800 /* The "first successor" check above only prevents multiple
1801 checks of crossjump(A,B). In order to prevent redundant
1802 checks of crossjump(B,A), require that A be the block
1803 with the lowest index. */
1804 if (e->src->index > e2->src->index)
1807 /* If nothing changed since the last attempt, there is nothing
1810 && (!(e->src->flags & BB_DIRTY)
1811 && !(e2->src->flags & BB_DIRTY)))
1814 if (try_crossjump_to_edge (mode, e, e2))
1827 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1828 instructions etc. Return nonzero if changes were made. */
1831 try_optimize_cfg (int mode)
1833 bool changed_overall = false;
1836 basic_block bb, b, next;
1838 if (mode & CLEANUP_CROSSJUMP)
1839 add_noreturn_fake_exit_edges ();
1842 update_forwarder_flag (bb);
1844 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1847 if (! targetm.cannot_modify_jumps_p ())
1850 /* Attempt to merge blocks as made possible by edge removal. If
1851 a block has only one successor, and the successor has only
1852 one predecessor, they may be combined. */
1860 "\n\ntry_optimize_cfg iteration %i\n\n",
1863 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1867 bool changed_here = false;
1869 /* Delete trivially dead basic blocks. */
1870 while (EDGE_COUNT (b->preds) == 0)
1874 fprintf (dump_file, "Deleting block %i.\n",
1877 delete_basic_block (b);
1878 if (!(mode & CLEANUP_CFGLAYOUT))
1883 /* Remove code labels no longer used. */
1884 if (EDGE_COUNT (b->preds) == 1
1885 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1886 && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
1887 && LABEL_P (BB_HEAD (b))
1888 /* If the previous block ends with a branch to this
1889 block, we can't delete the label. Normally this
1890 is a condjump that is yet to be simplified, but
1891 if CASE_DROPS_THRU, this can be a tablejump with
1892 some element going to the same place as the
1893 default (fallthru). */
1894 && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
1895 || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
1896 || ! label_is_jump_target_p (BB_HEAD (b),
1897 BB_END (EDGE_PRED (b, 0)->src))))
1899 rtx label = BB_HEAD (b);
1901 delete_insn_chain (label, label);
1902 /* In the case label is undeletable, move it after the
1903 BASIC_BLOCK note. */
1904 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1906 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1908 reorder_insns_nobb (label, label, bb_note);
1909 BB_HEAD (b) = bb_note;
1912 fprintf (dump_file, "Deleted label in block %i.\n",
1916 /* If we fall through an empty block, we can remove it. */
1917 if (!(mode & CLEANUP_CFGLAYOUT)
1918 && EDGE_COUNT (b->preds) == 1
1919 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1920 && !LABEL_P (BB_HEAD (b))
1921 && FORWARDER_BLOCK_P (b)
1922 /* Note that forwarder_block_p true ensures that
1923 there is a successor for this block. */
1924 && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
1925 && n_basic_blocks > 1)
1929 "Deleting fallthru block %i.\n",
1932 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1933 redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
1934 delete_basic_block (b);
1939 if (EDGE_COUNT (b->succs) == 1
1940 && (s = EDGE_SUCC (b, 0))
1941 && !(s->flags & EDGE_COMPLEX)
1942 && (c = s->dest) != EXIT_BLOCK_PTR
1943 && EDGE_COUNT (c->preds) == 1
1946 /* When not in cfg_layout mode use code aware of reordering
1947 INSN. This code possibly creates new basic blocks so it
1948 does not fit merge_blocks interface and is kept here in
1949 hope that it will become useless once more of compiler
1950 is transformed to use cfg_layout mode. */
1952 if ((mode & CLEANUP_CFGLAYOUT)
1953 && can_merge_blocks_p (b, c))
1955 merge_blocks (b, c);
1956 update_forwarder_flag (b);
1957 changed_here = true;
1959 else if (!(mode & CLEANUP_CFGLAYOUT)
1960 /* If the jump insn has side effects,
1961 we can't kill the edge. */
1962 && (!JUMP_P (BB_END (b))
1963 || (reload_completed
1964 ? simplejump_p (BB_END (b))
1965 : (onlyjump_p (BB_END (b))
1966 && !tablejump_p (BB_END (b),
1968 && (next = merge_blocks_move (s, b, c, mode)))
1971 changed_here = true;
1975 /* Simplify branch over branch. */
1976 if ((mode & CLEANUP_EXPENSIVE)
1977 && !(mode & CLEANUP_CFGLAYOUT)
1978 && try_simplify_condjump (b))
1979 changed_here = true;
1981 /* If B has a single outgoing edge, but uses a
1982 non-trivial jump instruction without side-effects, we
1983 can either delete the jump entirely, or replace it
1984 with a simple unconditional jump. */
1985 if (EDGE_COUNT (b->succs) == 1
1986 && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
1987 && onlyjump_p (BB_END (b))
1988 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1989 && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
1990 (mode & CLEANUP_CFGLAYOUT) != 0))
1992 update_forwarder_flag (b);
1993 changed_here = true;
1996 /* Simplify branch to branch. */
1997 if (try_forward_edges (mode, b))
1998 changed_here = true;
2000 /* Look for shared code between blocks. */
2001 if ((mode & CLEANUP_CROSSJUMP)
2002 && try_crossjump_bb (mode, b))
2003 changed_here = true;
2005 /* Don't get confused by the index shift caused by
2013 if ((mode & CLEANUP_CROSSJUMP)
2014 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2017 #ifdef ENABLE_CHECKING
2019 verify_flow_info ();
2022 changed_overall |= changed;
2028 if (mode & CLEANUP_CROSSJUMP)
2029 remove_fake_exit_edges ();
2031 clear_aux_for_blocks ();
2033 return changed_overall;
2036 /* Delete all unreachable basic blocks. */
2039 delete_unreachable_blocks (void)
2041 bool changed = false;
2042 basic_block b, next_bb;
2044 find_unreachable_blocks ();
2046 /* Delete all unreachable basic blocks. */
2048 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2050 next_bb = b->next_bb;
2052 if (!(b->flags & BB_REACHABLE))
2054 delete_basic_block (b);
2060 tidy_fallthru_edges ();
2064 /* Merges sequential blocks if possible. */
2067 merge_seq_blocks (void)
2070 bool changed = false;
2072 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2074 if (EDGE_COUNT (bb->succs) == 1
2075 && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
2077 /* Merge the blocks and retry. */
2078 merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
2089 /* Tidy the CFG by deleting unreachable code and whatnot. */
2092 cleanup_cfg (int mode)
2094 bool changed = false;
2096 timevar_push (TV_CLEANUP_CFG);
2097 if (delete_unreachable_blocks ())
2100 /* We've possibly created trivially dead code. Cleanup it right
2101 now to introduce more opportunities for try_optimize_cfg. */
2102 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2103 && !reload_completed)
2104 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2109 while (try_optimize_cfg (mode))
2111 delete_unreachable_blocks (), changed = true;
2112 if (mode & CLEANUP_UPDATE_LIFE)
2114 /* Cleaning up CFG introduces more opportunities for dead code
2115 removal that in turn may introduce more opportunities for
2116 cleaning up the CFG. */
2117 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2119 | PROP_SCAN_DEAD_CODE
2120 | PROP_KILL_DEAD_CODE
2121 | ((mode & CLEANUP_LOG_LINKS)
2122 ? PROP_LOG_LINKS : 0)))
2125 else if (!(mode & CLEANUP_NO_INSN_DEL)
2126 && (mode & CLEANUP_EXPENSIVE)
2127 && !reload_completed)
2129 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2134 delete_dead_jumptables ();
2137 /* Kill the data we won't maintain. */
2138 free_EXPR_LIST_list (&label_value_list);
2139 timevar_pop (TV_CLEANUP_CFG);