1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
43 #include "insn-config.h"
51 #include "cfglayout.h"
53 #include "tree-pass.h"
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */
63 static bool first_pass;
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */
66 static bool crossjumps_occured;
68 static bool try_crossjump_to_edge (int, edge, edge);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
72 static bool old_insns_match_p (int, rtx, rtx);
74 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
75 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block);
78 static bool try_forward_edges (int, basic_block);
79 static edge thread_jump (edge, basic_block);
80 static bool mark_effect (rtx, bitmap);
81 static void notice_new_block (basic_block);
82 static void update_forwarder_flag (basic_block);
83 static int mentions_nonequal_regs (rtx *, void *);
84 static void merge_memattrs (rtx, rtx);
86 /* Set flags for newly created block. */
89 notice_new_block (basic_block bb)
94 if (forwarder_block_p (bb))
95 bb->flags |= BB_FORWARDER_BLOCK;
98 /* Recompute forwarder flag after block has been modified. */
101 update_forwarder_flag (basic_block bb)
103 if (forwarder_block_p (bb))
104 bb->flags |= BB_FORWARDER_BLOCK;
106 bb->flags &= ~BB_FORWARDER_BLOCK;
109 /* Simplify a conditional jump around an unconditional jump.
110 Return true if something changed. */
113 try_simplify_condjump (basic_block cbranch_block)
115 basic_block jump_block, jump_dest_block, cbranch_dest_block;
116 edge cbranch_jump_edge, cbranch_fallthru_edge;
119 /* Verify that there are exactly two successors. */
120 if (EDGE_COUNT (cbranch_block->succs) != 2)
123 /* Verify that we've got a normal conditional branch at the end
125 cbranch_insn = BB_END (cbranch_block);
126 if (!any_condjump_p (cbranch_insn))
129 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
132 /* The next block must not have multiple predecessors, must not
133 be the last block in the function, and must contain just the
134 unconditional jump. */
135 jump_block = cbranch_fallthru_edge->dest;
136 if (!single_pred_p (jump_block)
137 || jump_block->next_bb == EXIT_BLOCK_PTR
138 || !FORWARDER_BLOCK_P (jump_block))
140 jump_dest_block = single_succ (jump_block);
142 /* If we are partitioning hot/cold basic blocks, we don't want to
143 mess up unconditional or indirect jumps that cross between hot
146 Basic block partitioning may result in some jumps that appear to
147 be optimizable (or blocks that appear to be mergeable), but which really
148 must be left untouched (they are required to make it safely across
149 partition boundaries). See the comments at the top of
150 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
152 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153 || (cbranch_jump_edge->flags & EDGE_CROSSING))
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block = cbranch_jump_edge->dest;
160 if (cbranch_dest_block == EXIT_BLOCK_PTR
161 || !can_fallthru (jump_block, cbranch_dest_block))
164 /* Invert the conditional branch. */
165 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
169 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
170 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
172 /* Success. Update the CFG to match. Note that after this point
173 the edge variable names appear backwards; the redirection is done
174 this way to preserve edge profile data. */
175 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
177 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
179 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
180 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
181 update_br_prob_note (cbranch_block);
183 /* Delete the block with the unconditional jump, and clean up the mess. */
184 delete_basic_block (jump_block);
185 tidy_fallthru_edge (cbranch_jump_edge);
186 update_forwarder_flag (cbranch_block);
191 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
192 on register. Used by jump threading. */
195 mark_effect (rtx exp, regset nonequal)
199 switch (GET_CODE (exp))
201 /* In case we do clobber the register, mark it as equal, as we know the
202 value is dead so it don't have to match. */
204 if (REG_P (XEXP (exp, 0)))
206 dest = XEXP (exp, 0);
207 regno = REGNO (dest);
208 CLEAR_REGNO_REG_SET (nonequal, regno);
209 if (regno < FIRST_PSEUDO_REGISTER)
211 int n = hard_regno_nregs[regno][GET_MODE (dest)];
213 CLEAR_REGNO_REG_SET (nonequal, regno + n);
219 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
221 dest = SET_DEST (exp);
226 regno = REGNO (dest);
227 SET_REGNO_REG_SET (nonequal, regno);
228 if (regno < FIRST_PSEUDO_REGISTER)
230 int n = hard_regno_nregs[regno][GET_MODE (dest)];
232 SET_REGNO_REG_SET (nonequal, regno + n);
241 /* Return nonzero if X is a register set in regset DATA.
242 Called via for_each_rtx. */
244 mentions_nonequal_regs (rtx *x, void *data)
246 regset nonequal = (regset) data;
252 if (REGNO_REG_SET_P (nonequal, regno))
254 if (regno < FIRST_PSEUDO_REGISTER)
256 int n = hard_regno_nregs[regno][GET_MODE (*x)];
258 if (REGNO_REG_SET_P (nonequal, regno + n))
264 /* Attempt to prove that the basic block B will have no side effects and
265 always continues in the same edge if reached via E. Return the edge
266 if exist, NULL otherwise. */
269 thread_jump (edge e, basic_block b)
271 rtx set1, set2, cond1, cond2, insn;
272 enum rtx_code code1, code2, reversed_code2;
273 bool reverse1 = false;
277 reg_set_iterator rsi;
279 if (b->flags & BB_NONTHREADABLE_BLOCK)
282 /* At the moment, we do handle only conditional jumps, but later we may
283 want to extend this code to tablejumps and others. */
284 if (EDGE_COUNT (e->src->succs) != 2)
286 if (EDGE_COUNT (b->succs) != 2)
288 b->flags |= BB_NONTHREADABLE_BLOCK;
292 /* Second branch must end with onlyjump, as we will eliminate the jump. */
293 if (!any_condjump_p (BB_END (e->src)))
296 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
298 b->flags |= BB_NONTHREADABLE_BLOCK;
302 set1 = pc_set (BB_END (e->src));
303 set2 = pc_set (BB_END (b));
304 if (((e->flags & EDGE_FALLTHRU) != 0)
305 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
308 cond1 = XEXP (SET_SRC (set1), 0);
309 cond2 = XEXP (SET_SRC (set2), 0);
311 code1 = reversed_comparison_code (cond1, BB_END (e->src));
313 code1 = GET_CODE (cond1);
315 code2 = GET_CODE (cond2);
316 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
318 if (!comparison_dominates_p (code1, code2)
319 && !comparison_dominates_p (code1, reversed_code2))
322 /* Ensure that the comparison operators are equivalent.
323 ??? This is far too pessimistic. We should allow swapped operands,
324 different CCmodes, or for example comparisons for interval, that
325 dominate even when operands are not equivalent. */
326 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
327 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
330 /* Short circuit cases where block B contains some side effects, as we can't
332 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
333 insn = NEXT_INSN (insn))
334 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
336 b->flags |= BB_NONTHREADABLE_BLOCK;
342 /* First process all values computed in the source basic block. */
343 for (insn = NEXT_INSN (BB_HEAD (e->src));
344 insn != NEXT_INSN (BB_END (e->src));
345 insn = NEXT_INSN (insn))
347 cselib_process_insn (insn);
349 nonequal = BITMAP_ALLOC (NULL);
350 CLEAR_REG_SET (nonequal);
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
356 for (insn = NEXT_INSN (BB_HEAD (b));
357 insn != NEXT_INSN (BB_END (b)) && !failed;
358 insn = NEXT_INSN (insn))
362 rtx pat = PATTERN (insn);
364 if (GET_CODE (pat) == PARALLEL)
366 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
367 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
370 failed |= mark_effect (pat, nonequal);
373 cselib_process_insn (insn);
376 /* Later we should clear nonequal of dead registers. So far we don't
377 have life information in cfg_cleanup. */
380 b->flags |= BB_NONTHREADABLE_BLOCK;
384 /* cond2 must not mention any register that is not equal to the
386 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
389 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
392 BITMAP_FREE (nonequal);
394 if ((comparison_dominates_p (code1, code2) != 0)
395 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
396 return BRANCH_EDGE (b);
398 return FALLTHRU_EDGE (b);
401 BITMAP_FREE (nonequal);
406 /* Attempt to forward edges leaving basic block B.
407 Return true if successful. */
410 try_forward_edges (int mode, basic_block b)
412 bool changed = false;
414 edge e, *threaded_edges = NULL;
416 /* If we are partitioning hot/cold basic blocks, we don't want to
417 mess up unconditional or indirect jumps that cross between hot
420 Basic block partitioning may result in some jumps that appear to
421 be optimizable (or blocks that appear to be mergeable), but which really
422 must be left untouched (they are required to make it safely across
423 partition boundaries). See the comments at the top of
424 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
426 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
429 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
431 basic_block target, first;
432 int counter, goto_locus;
433 bool threaded = false;
434 int nthreaded_edges = 0;
435 bool may_thread = first_pass | df_get_bb_dirty (b);
437 /* Skip complex edges because we don't know how to update them.
439 Still handle fallthru edges, as we can succeed to forward fallthru
440 edge to the same place as the branch edge of conditional branch
441 and turn conditional branch to an unconditional branch. */
442 if (e->flags & EDGE_COMPLEX)
448 target = first = e->dest;
449 counter = NUM_FIXED_BLOCKS;
450 goto_locus = e->goto_locus;
452 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
453 up jumps that cross between hot/cold sections.
455 Basic block partitioning may result in some jumps that appear
456 to be optimizable (or blocks that appear to be mergeable), but which
457 really must be left untouched (they are required to make it safely
458 across partition boundaries). See the comments at the top of
459 bb-reorder.c:partition_hot_cold_basic_blocks for complete
462 if (first != EXIT_BLOCK_PTR
463 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
466 while (counter < n_basic_blocks)
468 basic_block new_target = NULL;
469 bool new_target_threaded = false;
470 may_thread |= df_get_bb_dirty (target);
472 if (FORWARDER_BLOCK_P (target)
473 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
474 && single_succ (target) != EXIT_BLOCK_PTR)
476 /* Bypass trivial infinite loops. */
477 new_target = single_succ (target);
478 if (target == new_target)
479 counter = n_basic_blocks;
482 /* When not optimizing, ensure that edges or forwarder
483 blocks with different locus are not optimized out. */
484 int locus = single_succ_edge (target)->goto_locus;
486 if (locus && goto_locus && locus != goto_locus)
487 counter = n_basic_blocks;
491 if (INSN_P (BB_END (target)))
493 locus = INSN_LOCATOR (BB_END (target));
495 if (locus && goto_locus && locus != goto_locus)
496 counter = n_basic_blocks;
503 /* Allow to thread only over one edge at time to simplify updating
505 else if ((mode & CLEANUP_THREADING) && may_thread)
507 edge t = thread_jump (e, target);
511 threaded_edges = XNEWVEC (edge, n_basic_blocks);
516 /* Detect an infinite loop across blocks not
517 including the start block. */
518 for (i = 0; i < nthreaded_edges; ++i)
519 if (threaded_edges[i] == t)
521 if (i < nthreaded_edges)
523 counter = n_basic_blocks;
528 /* Detect an infinite loop across the start block. */
532 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
533 threaded_edges[nthreaded_edges++] = t;
535 new_target = t->dest;
536 new_target_threaded = true;
545 threaded |= new_target_threaded;
548 if (counter >= n_basic_blocks)
551 fprintf (dump_file, "Infinite loop in BB %i.\n",
554 else if (target == first)
555 ; /* We didn't do anything. */
558 /* Save the values now, as the edge may get removed. */
559 gcov_type edge_count = e->count;
560 int edge_probability = e->probability;
564 e->goto_locus = goto_locus;
566 /* Don't force if target is exit block. */
567 if (threaded && target != EXIT_BLOCK_PTR)
569 notice_new_block (redirect_edge_and_branch_force (e, target));
571 fprintf (dump_file, "Conditionals threaded.\n");
573 else if (!redirect_edge_and_branch (e, target))
577 "Forwarding edge %i->%i to %i failed.\n",
578 b->index, e->dest->index, target->index);
583 /* We successfully forwarded the edge. Now update profile
584 data: for each edge we traversed in the chain, remove
585 the original edge's execution count. */
586 edge_frequency = ((edge_probability * b->frequency
587 + REG_BR_PROB_BASE / 2)
590 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
591 b->flags |= BB_FORWARDER_BLOCK;
597 if (!single_succ_p (first))
599 gcc_assert (n < nthreaded_edges);
600 t = threaded_edges [n++];
601 gcc_assert (t->src == first);
602 update_bb_profile_for_threading (first, edge_frequency,
604 update_br_prob_note (first);
608 first->count -= edge_count;
609 if (first->count < 0)
611 first->frequency -= edge_frequency;
612 if (first->frequency < 0)
613 first->frequency = 0;
614 /* It is possible that as the result of
615 threading we've removed edge as it is
616 threaded to the fallthru edge. Avoid
617 getting out of sync. */
618 if (n < nthreaded_edges
619 && first == threaded_edges [n]->src)
621 t = single_succ_edge (first);
624 t->count -= edge_count;
629 while (first != target);
638 free (threaded_edges);
643 /* Blocks A and B are to be merged into a single block. A has no incoming
644 fallthru edge, so it can be moved before B without adding or modifying
645 any jumps (aside from the jump from A to B). */
648 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
652 /* If we are partitioning hot/cold basic blocks, we don't want to
653 mess up unconditional or indirect jumps that cross between hot
656 Basic block partitioning may result in some jumps that appear to
657 be optimizable (or blocks that appear to be mergeable), but which really
658 must be left untouched (they are required to make it safely across
659 partition boundaries). See the comments at the top of
660 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
662 if (BB_PARTITION (a) != BB_PARTITION (b))
665 barrier = next_nonnote_insn (BB_END (a));
666 gcc_assert (BARRIER_P (barrier));
667 delete_insn (barrier);
669 /* Scramble the insn chain. */
670 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
671 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
675 fprintf (dump_file, "Moved block %d before %d and merged.\n",
678 /* Swap the records for the two blocks around. */
681 link_block (a, b->prev_bb);
683 /* Now blocks A and B are contiguous. Merge them. */
687 /* Blocks A and B are to be merged into a single block. B has no outgoing
688 fallthru edge, so it can be moved after A without adding or modifying
689 any jumps (aside from the jump from A to B). */
692 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
694 rtx barrier, real_b_end;
697 /* If we are partitioning hot/cold basic blocks, we don't want to
698 mess up unconditional or indirect jumps that cross between hot
701 Basic block partitioning may result in some jumps that appear to
702 be optimizable (or blocks that appear to be mergeable), but which really
703 must be left untouched (they are required to make it safely across
704 partition boundaries). See the comments at the top of
705 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
707 if (BB_PARTITION (a) != BB_PARTITION (b))
710 real_b_end = BB_END (b);
712 /* If there is a jump table following block B temporarily add the jump table
713 to block B so that it will also be moved to the correct location. */
714 if (tablejump_p (BB_END (b), &label, &table)
715 && prev_active_insn (label) == BB_END (b))
720 /* There had better have been a barrier there. Delete it. */
721 barrier = NEXT_INSN (BB_END (b));
722 if (barrier && BARRIER_P (barrier))
723 delete_insn (barrier);
726 /* Scramble the insn chain. */
727 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
729 /* Restore the real end of b. */
730 BB_END (b) = real_b_end;
733 fprintf (dump_file, "Moved block %d after %d and merged.\n",
736 /* Now blocks A and B are contiguous. Merge them. */
740 /* Attempt to merge basic blocks that are potentially non-adjacent.
741 Return NULL iff the attempt failed, otherwise return basic block
742 where cleanup_cfg should continue. Because the merging commonly
743 moves basic block away or introduces another optimization
744 possibility, return basic block just before B so cleanup_cfg don't
747 It may be good idea to return basic block before C in the case
748 C has been moved after B and originally appeared earlier in the
749 insn sequence, but we have no information available about the
750 relative ordering of these two. Hopefully it is not too common. */
753 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
757 /* If we are partitioning hot/cold basic blocks, we don't want to
758 mess up unconditional or indirect jumps that cross between hot
761 Basic block partitioning may result in some jumps that appear to
762 be optimizable (or blocks that appear to be mergeable), but which really
763 must be left untouched (they are required to make it safely across
764 partition boundaries). See the comments at the top of
765 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
767 if (BB_PARTITION (b) != BB_PARTITION (c))
770 /* If B has a fallthru edge to C, no need to move anything. */
771 if (e->flags & EDGE_FALLTHRU)
773 int b_index = b->index, c_index = c->index;
775 update_forwarder_flag (b);
778 fprintf (dump_file, "Merged %d and %d without moving.\n",
781 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
784 /* Otherwise we will need to move code around. Do that only if expensive
785 transformations are allowed. */
786 else if (mode & CLEANUP_EXPENSIVE)
788 edge tmp_edge, b_fallthru_edge;
789 bool c_has_outgoing_fallthru;
790 bool b_has_incoming_fallthru;
793 /* Avoid overactive code motion, as the forwarder blocks should be
794 eliminated by edge redirection instead. One exception might have
795 been if B is a forwarder block and C has no fallthru edge, but
796 that should be cleaned up by bb-reorder instead. */
797 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
800 /* We must make sure to not munge nesting of lexical blocks,
801 and loop notes. This is done by squeezing out all the notes
802 and leaving them there to lie. Not ideal, but functional. */
804 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
805 if (tmp_edge->flags & EDGE_FALLTHRU)
808 c_has_outgoing_fallthru = (tmp_edge != NULL);
810 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
811 if (tmp_edge->flags & EDGE_FALLTHRU)
814 b_has_incoming_fallthru = (tmp_edge != NULL);
815 b_fallthru_edge = tmp_edge;
818 next = next->prev_bb;
820 /* Otherwise, we're going to try to move C after B. If C does
821 not have an outgoing fallthru, then it can be moved
822 immediately after B without introducing or modifying jumps. */
823 if (! c_has_outgoing_fallthru)
825 merge_blocks_move_successor_nojumps (b, c);
826 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
829 /* If B does not have an incoming fallthru, then it can be moved
830 immediately before C without introducing or modifying jumps.
831 C cannot be the first block, so we do not have to worry about
832 accessing a non-existent block. */
834 if (b_has_incoming_fallthru)
838 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
840 bb = force_nonfallthru (b_fallthru_edge);
842 notice_new_block (bb);
845 merge_blocks_move_predecessor_nojumps (b, c);
846 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
853 /* Removes the memory attributes of MEM expression
854 if they are not equal. */
857 merge_memattrs (rtx x, rtx y)
866 if (x == 0 || y == 0)
871 if (code != GET_CODE (y))
874 if (GET_MODE (x) != GET_MODE (y))
877 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
881 else if (! MEM_ATTRS (y))
887 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
889 set_mem_alias_set (x, 0);
890 set_mem_alias_set (y, 0);
893 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
897 set_mem_offset (x, 0);
898 set_mem_offset (y, 0);
900 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
902 set_mem_offset (x, 0);
903 set_mem_offset (y, 0);
908 else if (!MEM_SIZE (y))
911 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
912 INTVAL (MEM_SIZE (y))));
913 set_mem_size (x, mem_size);
914 set_mem_size (y, mem_size);
916 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
917 set_mem_align (y, MEM_ALIGN (x));
921 fmt = GET_RTX_FORMAT (code);
922 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
927 /* Two vectors must have the same length. */
928 if (XVECLEN (x, i) != XVECLEN (y, i))
931 for (j = 0; j < XVECLEN (x, i); j++)
932 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
937 merge_memattrs (XEXP (x, i), XEXP (y, i));
944 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
947 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
951 /* Verify that I1 and I2 are equivalent. */
952 if (GET_CODE (i1) != GET_CODE (i2))
958 if (GET_CODE (p1) != GET_CODE (p2))
961 /* If this is a CALL_INSN, compare register usage information.
962 If we don't check this on stack register machines, the two
963 CALL_INSNs might be merged leaving reg-stack.c with mismatching
964 numbers of stack registers in the same basic block.
965 If we don't check this on machines with delay slots, a delay slot may
966 be filled that clobbers a parameter expected by the subroutine.
968 ??? We take the simple route for now and assume that if they're
969 equal, they were constructed identically. */
972 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
973 CALL_INSN_FUNCTION_USAGE (i2))
974 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
978 /* If cross_jump_death_matters is not 0, the insn's mode
979 indicates whether or not the insn contains any stack-like
982 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
984 /* If register stack conversion has already been done, then
985 death notes must also be compared before it is certain that
986 the two instruction streams match. */
989 HARD_REG_SET i1_regset, i2_regset;
991 CLEAR_HARD_REG_SET (i1_regset);
992 CLEAR_HARD_REG_SET (i2_regset);
994 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
995 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
996 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
998 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
999 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1000 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1002 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1007 if (reload_completed
1008 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1011 /* Do not do EQUIV substitution after reload. First, we're undoing the
1012 work of reload_cse. Second, we may be undoing the work of the post-
1013 reload splitting pass. */
1014 /* ??? Possibly add a new phase switch variable that can be used by
1015 targets to disallow the troublesome insns after splitting. */
1016 if (!reload_completed)
1018 /* The following code helps take care of G++ cleanups. */
1019 rtx equiv1 = find_reg_equal_equiv_note (i1);
1020 rtx equiv2 = find_reg_equal_equiv_note (i2);
1022 if (equiv1 && equiv2
1023 /* If the equivalences are not to a constant, they may
1024 reference pseudos that no longer exist, so we can't
1026 && (! reload_completed
1027 || (CONSTANT_P (XEXP (equiv1, 0))
1028 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1030 rtx s1 = single_set (i1);
1031 rtx s2 = single_set (i2);
1032 if (s1 != 0 && s2 != 0
1033 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1035 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1036 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1037 if (! rtx_renumbered_equal_p (p1, p2))
1039 else if (apply_change_group ())
1048 /* Look through the insns at the end of BB1 and BB2 and find the longest
1049 sequence that are equivalent. Store the first insns for that sequence
1050 in *F1 and *F2 and return the sequence length.
1052 To simplify callers of this function, if the blocks match exactly,
1053 store the head of the blocks in *F1 and *F2. */
1056 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1057 basic_block bb2, rtx *f1, rtx *f2)
1059 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1062 /* Skip simple jumps at the end of the blocks. Complex jumps still
1063 need to be compared for equivalence, which we'll do below. */
1066 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1068 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1071 i1 = PREV_INSN (i1);
1076 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1079 /* Count everything except for unconditional jump as insn. */
1080 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1082 i2 = PREV_INSN (i2);
1088 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1089 i1 = PREV_INSN (i1);
1091 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1092 i2 = PREV_INSN (i2);
1094 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1097 if (!old_insns_match_p (mode, i1, i2))
1100 merge_memattrs (i1, i2);
1102 /* Don't begin a cross-jump with a NOTE insn. */
1105 /* If the merged insns have different REG_EQUAL notes, then
1107 rtx equiv1 = find_reg_equal_equiv_note (i1);
1108 rtx equiv2 = find_reg_equal_equiv_note (i2);
1110 if (equiv1 && !equiv2)
1111 remove_note (i1, equiv1);
1112 else if (!equiv1 && equiv2)
1113 remove_note (i2, equiv2);
1114 else if (equiv1 && equiv2
1115 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1117 remove_note (i1, equiv1);
1118 remove_note (i2, equiv2);
1121 afterlast1 = last1, afterlast2 = last2;
1122 last1 = i1, last2 = i2;
1126 i1 = PREV_INSN (i1);
1127 i2 = PREV_INSN (i2);
1131 /* Don't allow the insn after a compare to be shared by
1132 cross-jumping unless the compare is also shared. */
1133 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1134 last1 = afterlast1, last2 = afterlast2, ninsns--;
1137 /* Include preceding notes and labels in the cross-jump. One,
1138 this may bring us to the head of the blocks as requested above.
1139 Two, it keeps line number notes as matched as may be. */
1142 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1143 last1 = PREV_INSN (last1);
1145 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1146 last1 = PREV_INSN (last1);
1148 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1149 last2 = PREV_INSN (last2);
1151 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1152 last2 = PREV_INSN (last2);
1161 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1162 the branch instruction. This means that if we commonize the control
1163 flow before end of the basic block, the semantic remains unchanged.
1165 We may assume that there exists one edge with a common destination. */
1168 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1170 int nehedges1 = 0, nehedges2 = 0;
1171 edge fallthru1 = 0, fallthru2 = 0;
1175 /* If BB1 has only one successor, we may be looking at either an
1176 unconditional jump, or a fake edge to exit. */
1177 if (single_succ_p (bb1)
1178 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1179 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1180 return (single_succ_p (bb2)
1181 && (single_succ_edge (bb2)->flags
1182 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1183 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1185 /* Match conditional jumps - this may get tricky when fallthru and branch
1186 edges are crossed. */
1187 if (EDGE_COUNT (bb1->succs) == 2
1188 && any_condjump_p (BB_END (bb1))
1189 && onlyjump_p (BB_END (bb1)))
1191 edge b1, f1, b2, f2;
1192 bool reverse, match;
1193 rtx set1, set2, cond1, cond2;
1194 enum rtx_code code1, code2;
1196 if (EDGE_COUNT (bb2->succs) != 2
1197 || !any_condjump_p (BB_END (bb2))
1198 || !onlyjump_p (BB_END (bb2)))
1201 b1 = BRANCH_EDGE (bb1);
1202 b2 = BRANCH_EDGE (bb2);
1203 f1 = FALLTHRU_EDGE (bb1);
1204 f2 = FALLTHRU_EDGE (bb2);
1206 /* Get around possible forwarders on fallthru edges. Other cases
1207 should be optimized out already. */
1208 if (FORWARDER_BLOCK_P (f1->dest))
1209 f1 = single_succ_edge (f1->dest);
1211 if (FORWARDER_BLOCK_P (f2->dest))
1212 f2 = single_succ_edge (f2->dest);
1214 /* To simplify use of this function, return false if there are
1215 unneeded forwarder blocks. These will get eliminated later
1216 during cleanup_cfg. */
1217 if (FORWARDER_BLOCK_P (f1->dest)
1218 || FORWARDER_BLOCK_P (f2->dest)
1219 || FORWARDER_BLOCK_P (b1->dest)
1220 || FORWARDER_BLOCK_P (b2->dest))
1223 if (f1->dest == f2->dest && b1->dest == b2->dest)
1225 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1230 set1 = pc_set (BB_END (bb1));
1231 set2 = pc_set (BB_END (bb2));
1232 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1233 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1236 cond1 = XEXP (SET_SRC (set1), 0);
1237 cond2 = XEXP (SET_SRC (set2), 0);
1238 code1 = GET_CODE (cond1);
1240 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1242 code2 = GET_CODE (cond2);
1244 if (code2 == UNKNOWN)
1247 /* Verify codes and operands match. */
1248 match = ((code1 == code2
1249 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1250 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1251 || (code1 == swap_condition (code2)
1252 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1254 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1257 /* If we return true, we will join the blocks. Which means that
1258 we will only have one branch prediction bit to work with. Thus
1259 we require the existing branches to have probabilities that are
1262 && optimize_bb_for_speed_p (bb1)
1263 && optimize_bb_for_speed_p (bb2))
1267 if (b1->dest == b2->dest)
1268 prob2 = b2->probability;
1270 /* Do not use f2 probability as f2 may be forwarded. */
1271 prob2 = REG_BR_PROB_BASE - b2->probability;
1273 /* Fail if the difference in probabilities is greater than 50%.
1274 This rules out two well-predicted branches with opposite
1276 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1280 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1281 bb1->index, bb2->index, b1->probability, prob2);
1287 if (dump_file && match)
1288 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1289 bb1->index, bb2->index);
1294 /* Generic case - we are seeing a computed jump, table jump or trapping
1297 /* Check whether there are tablejumps in the end of BB1 and BB2.
1298 Return true if they are identical. */
1303 if (tablejump_p (BB_END (bb1), &label1, &table1)
1304 && tablejump_p (BB_END (bb2), &label2, &table2)
1305 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1307 /* The labels should never be the same rtx. If they really are same
1308 the jump tables are same too. So disable crossjumping of blocks BB1
1309 and BB2 because when deleting the common insns in the end of BB1
1310 by delete_basic_block () the jump table would be deleted too. */
1311 /* If LABEL2 is referenced in BB1->END do not do anything
1312 because we would loose information when replacing
1313 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1314 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1316 /* Set IDENTICAL to true when the tables are identical. */
1317 bool identical = false;
1320 p1 = PATTERN (table1);
1321 p2 = PATTERN (table2);
1322 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1326 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1327 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1328 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1329 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1334 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1335 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1341 replace_label_data rr;
1344 /* Temporarily replace references to LABEL1 with LABEL2
1345 in BB1->END so that we could compare the instructions. */
1348 rr.update_label_nuses = false;
1349 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1351 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1352 if (dump_file && match)
1354 "Tablejumps in bb %i and %i match.\n",
1355 bb1->index, bb2->index);
1357 /* Set the original label in BB1->END because when deleting
1358 a block whose end is a tablejump, the tablejump referenced
1359 from the instruction is deleted too. */
1362 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1371 /* First ensure that the instructions match. There may be many outgoing
1372 edges so this test is generally cheaper. */
1373 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1376 /* Search the outgoing edges, ensure that the counts do match, find possible
1377 fallthru and exception handling edges since these needs more
1379 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1382 FOR_EACH_EDGE (e1, ei, bb1->succs)
1384 e2 = EDGE_SUCC (bb2, ei.index);
1386 if (e1->flags & EDGE_EH)
1389 if (e2->flags & EDGE_EH)
1392 if (e1->flags & EDGE_FALLTHRU)
1394 if (e2->flags & EDGE_FALLTHRU)
1398 /* If number of edges of various types does not match, fail. */
1399 if (nehedges1 != nehedges2
1400 || (fallthru1 != 0) != (fallthru2 != 0))
1403 /* fallthru edges must be forwarded to the same destination. */
1406 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1407 ? single_succ (fallthru1->dest): fallthru1->dest);
1408 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1409 ? single_succ (fallthru2->dest): fallthru2->dest);
1415 /* Ensure the same EH region. */
1417 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1418 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1423 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1427 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1428 version of sequence abstraction. */
1429 FOR_EACH_EDGE (e1, ei, bb2->succs)
1433 basic_block d1 = e1->dest;
1435 if (FORWARDER_BLOCK_P (d1))
1436 d1 = EDGE_SUCC (d1, 0)->dest;
1438 FOR_EACH_EDGE (e2, ei, bb1->succs)
1440 basic_block d2 = e2->dest;
1441 if (FORWARDER_BLOCK_P (d2))
1442 d2 = EDGE_SUCC (d2, 0)->dest;
1454 /* Returns true if BB basic block has a preserve label. */
1457 block_has_preserve_label (basic_block bb)
1461 && LABEL_PRESERVE_P (block_label (bb)));
1464 /* E1 and E2 are edges with the same destination block. Search their
1465 predecessors for common code. If found, redirect control flow from
1466 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1469 try_crossjump_to_edge (int mode, edge e1, edge e2)
1472 basic_block src1 = e1->src, src2 = e2->src;
1473 basic_block redirect_to, redirect_from, to_remove;
1474 rtx newpos1, newpos2;
1478 newpos1 = newpos2 = NULL_RTX;
1480 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1481 to try this optimization.
1483 Basic block partitioning may result in some jumps that appear to
1484 be optimizable (or blocks that appear to be mergeable), but which really
1485 must be left untouched (they are required to make it safely across
1486 partition boundaries). See the comments at the top of
1487 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1489 if (flag_reorder_blocks_and_partition && reload_completed)
1492 /* Search backward through forwarder blocks. We don't need to worry
1493 about multiple entry or chained forwarders, as they will be optimized
1494 away. We do this to look past the unconditional jump following a
1495 conditional jump that is required due to the current CFG shape. */
1496 if (single_pred_p (src1)
1497 && FORWARDER_BLOCK_P (src1))
1498 e1 = single_pred_edge (src1), src1 = e1->src;
1500 if (single_pred_p (src2)
1501 && FORWARDER_BLOCK_P (src2))
1502 e2 = single_pred_edge (src2), src2 = e2->src;
1504 /* Nothing to do if we reach ENTRY, or a common source block. */
1505 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1510 /* Seeing more than 1 forwarder blocks would confuse us later... */
1511 if (FORWARDER_BLOCK_P (e1->dest)
1512 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1515 if (FORWARDER_BLOCK_P (e2->dest)
1516 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1519 /* Likewise with dead code (possibly newly created by the other optimizations
1521 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1524 /* Look for the common insn sequence, part the first ... */
1525 if (!outgoing_edges_match (mode, src1, src2))
1528 /* ... and part the second. */
1529 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1531 /* Don't proceed with the crossjump unless we found a sufficient number
1532 of matching instructions or the 'from' block was totally matched
1533 (such that its predecessors will hopefully be redirected and the
1535 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1536 && (newpos1 != BB_HEAD (src1)))
1539 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1540 if (block_has_preserve_label (e1->dest)
1541 && (e1->flags & EDGE_ABNORMAL))
1544 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1546 If we have tablejumps in the end of SRC1 and SRC2
1547 they have been already compared for equivalence in outgoing_edges_match ()
1548 so replace the references to TABLE1 by references to TABLE2. */
1553 if (tablejump_p (BB_END (src1), &label1, &table1)
1554 && tablejump_p (BB_END (src2), &label2, &table2)
1555 && label1 != label2)
1557 replace_label_data rr;
1560 /* Replace references to LABEL1 with LABEL2. */
1563 rr.update_label_nuses = true;
1564 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1566 /* Do not replace the label in SRC1->END because when deleting
1567 a block whose end is a tablejump, the tablejump referenced
1568 from the instruction is deleted too. */
1569 if (insn != BB_END (src1))
1570 for_each_rtx (&insn, replace_label, &rr);
1575 /* Avoid splitting if possible. We must always split when SRC2 has
1576 EH predecessor edges, or we may end up with basic blocks with both
1577 normal and EH predecessor edges. */
1578 if (newpos2 == BB_HEAD (src2)
1579 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1583 if (newpos2 == BB_HEAD (src2))
1585 /* Skip possible basic block header. */
1586 if (LABEL_P (newpos2))
1587 newpos2 = NEXT_INSN (newpos2);
1588 if (NOTE_P (newpos2))
1589 newpos2 = NEXT_INSN (newpos2);
1593 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1594 src2->index, nmatch);
1595 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1600 "Cross jumping from bb %i to bb %i; %i common insns\n",
1601 src1->index, src2->index, nmatch);
1603 /* We may have some registers visible through the block. */
1604 df_set_bb_dirty (redirect_to);
1606 /* Recompute the frequencies and counts of outgoing edges. */
1607 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1611 basic_block d = s->dest;
1613 if (FORWARDER_BLOCK_P (d))
1614 d = single_succ (d);
1616 FOR_EACH_EDGE (s2, ei, src1->succs)
1618 basic_block d2 = s2->dest;
1619 if (FORWARDER_BLOCK_P (d2))
1620 d2 = single_succ (d2);
1625 s->count += s2->count;
1627 /* Take care to update possible forwarder blocks. We verified
1628 that there is no more than one in the chain, so we can't run
1629 into infinite loop. */
1630 if (FORWARDER_BLOCK_P (s->dest))
1632 single_succ_edge (s->dest)->count += s2->count;
1633 s->dest->count += s2->count;
1634 s->dest->frequency += EDGE_FREQUENCY (s);
1637 if (FORWARDER_BLOCK_P (s2->dest))
1639 single_succ_edge (s2->dest)->count -= s2->count;
1640 if (single_succ_edge (s2->dest)->count < 0)
1641 single_succ_edge (s2->dest)->count = 0;
1642 s2->dest->count -= s2->count;
1643 s2->dest->frequency -= EDGE_FREQUENCY (s);
1644 if (s2->dest->frequency < 0)
1645 s2->dest->frequency = 0;
1646 if (s2->dest->count < 0)
1647 s2->dest->count = 0;
1650 if (!redirect_to->frequency && !src1->frequency)
1651 s->probability = (s->probability + s2->probability) / 2;
1654 = ((s->probability * redirect_to->frequency +
1655 s2->probability * src1->frequency)
1656 / (redirect_to->frequency + src1->frequency));
1659 /* Adjust count and frequency for the block. An earlier jump
1660 threading pass may have left the profile in an inconsistent
1661 state (see update_bb_profile_for_threading) so we must be
1662 prepared for overflows. */
1663 redirect_to->count += src1->count;
1664 redirect_to->frequency += src1->frequency;
1665 if (redirect_to->frequency > BB_FREQ_MAX)
1666 redirect_to->frequency = BB_FREQ_MAX;
1667 update_br_prob_note (redirect_to);
1669 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1671 /* Skip possible basic block header. */
1672 if (LABEL_P (newpos1))
1673 newpos1 = NEXT_INSN (newpos1);
1675 if (NOTE_P (newpos1))
1676 newpos1 = NEXT_INSN (newpos1);
1678 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1679 to_remove = single_succ (redirect_from);
1681 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1682 delete_basic_block (to_remove);
1684 update_forwarder_flag (redirect_from);
1685 if (redirect_to != src2)
1686 update_forwarder_flag (src2);
1691 /* Search the predecessors of BB for common insn sequences. When found,
1692 share code between them by redirecting control flow. Return true if
1693 any changes made. */
1696 try_crossjump_bb (int mode, basic_block bb)
1698 edge e, e2, fallthru;
1700 unsigned max, ix, ix2;
1701 basic_block ev, ev2;
1704 /* Nothing to do if there is not at least two incoming edges. */
1705 if (EDGE_COUNT (bb->preds) < 2)
1708 /* Don't crossjump if this block ends in a computed jump,
1709 unless we are optimizing for size. */
1710 if (optimize_bb_for_size_p (bb)
1711 && bb != EXIT_BLOCK_PTR
1712 && computed_jump_p (BB_END (bb)))
1715 /* If we are partitioning hot/cold basic blocks, we don't want to
1716 mess up unconditional or indirect jumps that cross between hot
1719 Basic block partitioning may result in some jumps that appear to
1720 be optimizable (or blocks that appear to be mergeable), but which really
1721 must be left untouched (they are required to make it safely across
1722 partition boundaries). See the comments at the top of
1723 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1725 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1726 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1727 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1730 /* It is always cheapest to redirect a block that ends in a branch to
1731 a block that falls through into BB, as that adds no branches to the
1732 program. We'll try that combination first. */
1734 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1736 if (EDGE_COUNT (bb->preds) > max)
1739 FOR_EACH_EDGE (e, ei, bb->preds)
1741 if (e->flags & EDGE_FALLTHRU)
1749 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1751 e = EDGE_PRED (ev, ix);
1754 /* As noted above, first try with the fallthru predecessor (or, a
1755 fallthru predecessor if we are in cfglayout mode). */
1758 /* Don't combine the fallthru edge into anything else.
1759 If there is a match, we'll do it the other way around. */
1762 /* If nothing changed since the last attempt, there is nothing
1765 && (!(df_get_bb_dirty (e->src))
1766 && !(df_get_bb_dirty (fallthru->src))))
1769 if (try_crossjump_to_edge (mode, e, fallthru))
1778 /* Non-obvious work limiting check: Recognize that we're going
1779 to call try_crossjump_bb on every basic block. So if we have
1780 two blocks with lots of outgoing edges (a switch) and they
1781 share lots of common destinations, then we would do the
1782 cross-jump check once for each common destination.
1784 Now, if the blocks actually are cross-jump candidates, then
1785 all of their destinations will be shared. Which means that
1786 we only need check them for cross-jump candidacy once. We
1787 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1788 choosing to do the check from the block for which the edge
1789 in question is the first successor of A. */
1790 if (EDGE_SUCC (e->src, 0) != e)
1793 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1795 e2 = EDGE_PRED (ev2, ix2);
1801 /* We've already checked the fallthru edge above. */
1805 /* The "first successor" check above only prevents multiple
1806 checks of crossjump(A,B). In order to prevent redundant
1807 checks of crossjump(B,A), require that A be the block
1808 with the lowest index. */
1809 if (e->src->index > e2->src->index)
1812 /* If nothing changed since the last attempt, there is nothing
1815 && (!(df_get_bb_dirty (e->src))
1816 && !(df_get_bb_dirty (e2->src))))
1819 if (try_crossjump_to_edge (mode, e, e2))
1830 crossjumps_occured = true;
1835 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1836 instructions etc. Return nonzero if changes were made. */
1839 try_optimize_cfg (int mode)
1841 bool changed_overall = false;
1844 basic_block bb, b, next;
1846 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1849 crossjumps_occured = false;
1852 update_forwarder_flag (bb);
1854 if (! targetm.cannot_modify_jumps_p ())
1857 /* Attempt to merge blocks as made possible by edge removal. If
1858 a block has only one successor, and the successor has only
1859 one predecessor, they may be combined. */
1867 "\n\ntry_optimize_cfg iteration %i\n\n",
1870 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1874 bool changed_here = false;
1876 /* Delete trivially dead basic blocks. */
1877 if (EDGE_COUNT (b->preds) == 0)
1881 fprintf (dump_file, "Deleting block %i.\n",
1884 delete_basic_block (b);
1885 if (!(mode & CLEANUP_CFGLAYOUT))
1887 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
1888 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
1892 /* Remove code labels no longer used. */
1893 if (single_pred_p (b)
1894 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1895 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1896 && LABEL_P (BB_HEAD (b))
1897 /* If the previous block ends with a branch to this
1898 block, we can't delete the label. Normally this
1899 is a condjump that is yet to be simplified, but
1900 if CASE_DROPS_THRU, this can be a tablejump with
1901 some element going to the same place as the
1902 default (fallthru). */
1903 && (single_pred (b) == ENTRY_BLOCK_PTR
1904 || !JUMP_P (BB_END (single_pred (b)))
1905 || ! label_is_jump_target_p (BB_HEAD (b),
1906 BB_END (single_pred (b)))))
1908 rtx label = BB_HEAD (b);
1910 delete_insn_chain (label, label, false);
1911 /* If the case label is undeletable, move it after the
1912 BASIC_BLOCK note. */
1913 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1915 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1917 reorder_insns_nobb (label, label, bb_note);
1918 BB_HEAD (b) = bb_note;
1919 if (BB_END (b) == bb_note)
1923 fprintf (dump_file, "Deleted label in block %i.\n",
1927 /* If we fall through an empty block, we can remove it. */
1928 if (!(mode & CLEANUP_CFGLAYOUT)
1929 && single_pred_p (b)
1930 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1931 && !LABEL_P (BB_HEAD (b))
1932 && FORWARDER_BLOCK_P (b)
1933 /* Note that forwarder_block_p true ensures that
1934 there is a successor for this block. */
1935 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1936 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
1940 "Deleting fallthru block %i.\n",
1943 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1944 redirect_edge_succ_nodup (single_pred_edge (b),
1946 delete_basic_block (b);
1951 if (single_succ_p (b)
1952 && (s = single_succ_edge (b))
1953 && !(s->flags & EDGE_COMPLEX)
1954 && (c = s->dest) != EXIT_BLOCK_PTR
1955 && single_pred_p (c)
1958 /* When not in cfg_layout mode use code aware of reordering
1959 INSN. This code possibly creates new basic blocks so it
1960 does not fit merge_blocks interface and is kept here in
1961 hope that it will become useless once more of compiler
1962 is transformed to use cfg_layout mode. */
1964 if ((mode & CLEANUP_CFGLAYOUT)
1965 && can_merge_blocks_p (b, c))
1967 merge_blocks (b, c);
1968 update_forwarder_flag (b);
1969 changed_here = true;
1971 else if (!(mode & CLEANUP_CFGLAYOUT)
1972 /* If the jump insn has side effects,
1973 we can't kill the edge. */
1974 && (!JUMP_P (BB_END (b))
1975 || (reload_completed
1976 ? simplejump_p (BB_END (b))
1977 : (onlyjump_p (BB_END (b))
1978 && !tablejump_p (BB_END (b),
1980 && (next = merge_blocks_move (s, b, c, mode)))
1983 changed_here = true;
1987 /* Simplify branch over branch. */
1988 if ((mode & CLEANUP_EXPENSIVE)
1989 && !(mode & CLEANUP_CFGLAYOUT)
1990 && try_simplify_condjump (b))
1991 changed_here = true;
1993 /* If B has a single outgoing edge, but uses a
1994 non-trivial jump instruction without side-effects, we
1995 can either delete the jump entirely, or replace it
1996 with a simple unconditional jump. */
1997 if (single_succ_p (b)
1998 && single_succ (b) != EXIT_BLOCK_PTR
1999 && onlyjump_p (BB_END (b))
2000 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2001 && try_redirect_by_replacing_jump (single_succ_edge (b),
2003 (mode & CLEANUP_CFGLAYOUT) != 0))
2005 update_forwarder_flag (b);
2006 changed_here = true;
2009 /* Simplify branch to branch. */
2010 if (try_forward_edges (mode, b))
2011 changed_here = true;
2013 /* Look for shared code between blocks. */
2014 if ((mode & CLEANUP_CROSSJUMP)
2015 && try_crossjump_bb (mode, b))
2016 changed_here = true;
2018 /* Don't get confused by the index shift caused by
2026 if ((mode & CLEANUP_CROSSJUMP)
2027 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2030 #ifdef ENABLE_CHECKING
2032 verify_flow_info ();
2035 changed_overall |= changed;
2042 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2044 return changed_overall;
2047 /* Delete all unreachable basic blocks. */
2050 delete_unreachable_blocks (void)
2052 bool changed = false;
2053 basic_block b, next_bb;
2055 find_unreachable_blocks ();
2057 /* Delete all unreachable basic blocks. */
2059 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2061 next_bb = b->next_bb;
2063 if (!(b->flags & BB_REACHABLE))
2065 delete_basic_block (b);
2071 tidy_fallthru_edges ();
2075 /* Delete any jump tables never referenced. We can't delete them at the
2076 time of removing tablejump insn as they are referenced by the preceding
2077 insns computing the destination, so we delay deleting and garbagecollect
2078 them once life information is computed. */
2080 delete_dead_jumptables (void)
2084 /* A dead jump table does not belong to any basic block. Scan insns
2085 between two adjacent basic blocks. */
2090 for (insn = NEXT_INSN (BB_END (bb));
2091 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2094 next = NEXT_INSN (insn);
2096 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2098 && (GET_CODE (PATTERN (next)) == ADDR_VEC
2099 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC))
2101 rtx label = insn, jump = next;
2104 fprintf (dump_file, "Dead jumptable %i removed\n",
2107 next = NEXT_INSN (next);
2109 delete_insn (label);
2116 /* Tidy the CFG by deleting unreachable code and whatnot. */
2119 cleanup_cfg (int mode)
2121 bool changed = false;
2123 /* Set the cfglayout mode flag here. We could update all the callers
2124 but that is just inconvenient, especially given that we eventually
2125 want to have cfglayout mode as the default. */
2126 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2127 mode |= CLEANUP_CFGLAYOUT;
2129 timevar_push (TV_CLEANUP_CFG);
2130 if (delete_unreachable_blocks ())
2133 /* We've possibly created trivially dead code. Cleanup it right
2134 now to introduce more opportunities for try_optimize_cfg. */
2135 if (!(mode & (CLEANUP_NO_INSN_DEL))
2136 && !reload_completed)
2137 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2142 /* To tail-merge blocks ending in the same noreturn function (e.g.
2143 a call to abort) we have to insert fake edges to exit. Do this
2144 here once. The fake edges do not interfere with any other CFG
2146 if (mode & CLEANUP_CROSSJUMP)
2147 add_noreturn_fake_exit_edges ();
2149 if (!dbg_cnt (cfg_cleanup))
2152 while (try_optimize_cfg (mode))
2154 delete_unreachable_blocks (), changed = true;
2155 if (!(mode & CLEANUP_NO_INSN_DEL))
2157 /* Try to remove some trivially dead insns when doing an expensive
2158 cleanup. But delete_trivially_dead_insns doesn't work after
2159 reload (it only handles pseudos) and run_fast_dce is too costly
2160 to run in every iteration.
2162 For effective cross jumping, we really want to run a fast DCE to
2163 clean up any dead conditions, or they get in the way of performing
2166 Other transformations in cleanup_cfg are not so sensitive to dead
2167 code, so delete_trivially_dead_insns or even doing nothing at all
2169 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2170 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2172 else if ((mode & CLEANUP_CROSSJUMP)
2173 && crossjumps_occured)
2180 if (mode & CLEANUP_CROSSJUMP)
2181 remove_fake_exit_edges ();
2183 /* Don't call delete_dead_jumptables in cfglayout mode, because
2184 that function assumes that jump tables are in the insns stream.
2185 But we also don't _have_ to delete dead jumptables in cfglayout
2186 mode because we shouldn't even be looking at things that are
2187 not in a basic block. Dead jumptables are cleaned up when
2188 going out of cfglayout mode. */
2189 if (!(mode & CLEANUP_CFGLAYOUT))
2190 delete_dead_jumptables ();
2192 timevar_pop (TV_CLEANUP_CFG);
2198 rest_of_handle_jump (void)
2200 delete_unreachable_blocks ();
2202 if (crtl->tail_call_emit)
2203 fixup_tail_calls ();
2207 struct rtl_opt_pass pass_jump =
2211 "sibling", /* name */
2213 rest_of_handle_jump, /* execute */
2216 0, /* static_pass_number */
2217 TV_JUMP, /* tv_id */
2218 0, /* properties_required */
2219 0, /* properties_provided */
2220 0, /* properties_destroyed */
2221 TODO_ggc_collect, /* todo_flags_start */
2222 TODO_verify_flow, /* todo_flags_finish */
2228 rest_of_handle_jump2 (void)
2230 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2232 dump_flow_info (dump_file, dump_flags);
2233 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2234 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2239 struct rtl_opt_pass pass_jump2 =
2245 rest_of_handle_jump2, /* execute */
2248 0, /* static_pass_number */
2249 TV_JUMP, /* tv_id */
2250 0, /* properties_required */
2251 0, /* properties_provided */
2252 0, /* properties_destroyed */
2253 TODO_ggc_collect, /* todo_flags_start */
2254 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */