1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
50 /* cleanup_cfg maintains following flags for each basic block. */
54 /* Set if life info needs to be recomputed for given BB. */
56 /* Set if BB is the forwarder block to avoid too many
57 forwarder_block_p calls. */
58 BB_FORWARDER_BLOCK = 2
61 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
62 #define BB_SET_FLAG(BB, FLAG) \
63 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
64 #define BB_CLEAR_FLAG(BB, FLAG) \
65 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
67 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
69 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
70 static bool try_crossjump_bb PARAMS ((int, basic_block));
71 static bool outgoing_edges_match PARAMS ((int,
72 basic_block, basic_block));
73 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
75 static bool insns_match_p PARAMS ((int, rtx, rtx));
77 static bool delete_unreachable_blocks PARAMS ((void));
78 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
79 static bool tail_recursion_label_p PARAMS ((rtx));
80 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
82 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
84 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
86 static bool try_optimize_cfg PARAMS ((int));
87 static bool try_simplify_condjump PARAMS ((basic_block));
88 static bool try_forward_edges PARAMS ((int, basic_block));
89 static edge thread_jump PARAMS ((int, edge, basic_block));
90 static bool mark_effect PARAMS ((rtx, bitmap));
91 static void notice_new_block PARAMS ((basic_block));
92 static void update_forwarder_flag PARAMS ((basic_block));
94 /* Set flags for newly created block. */
103 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
104 if (forwarder_block_p (bb))
105 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
108 /* Recompute forwarder flag after block has been modified. */
111 update_forwarder_flag (bb)
114 if (forwarder_block_p (bb))
115 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
117 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
120 /* Simplify a conditional jump around an unconditional jump.
121 Return true if something changed. */
124 try_simplify_condjump (cbranch_block)
125 basic_block cbranch_block;
127 basic_block jump_block, jump_dest_block, cbranch_dest_block;
128 edge cbranch_jump_edge, cbranch_fallthru_edge;
131 /* Verify that there are exactly two successors. */
132 if (!cbranch_block->succ
133 || !cbranch_block->succ->succ_next
134 || cbranch_block->succ->succ_next->succ_next)
137 /* Verify that we've got a normal conditional branch at the end
139 cbranch_insn = cbranch_block->end;
140 if (!any_condjump_p (cbranch_insn))
143 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
144 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
146 /* The next block must not have multiple predecessors, must not
147 be the last block in the function, and must contain just the
148 unconditional jump. */
149 jump_block = cbranch_fallthru_edge->dest;
150 if (jump_block->pred->pred_next
151 || jump_block->index == n_basic_blocks - 1
152 || !FORWARDER_BLOCK_P (jump_block))
154 jump_dest_block = jump_block->succ->dest;
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block = cbranch_jump_edge->dest;
160 if (!can_fallthru (jump_block, cbranch_dest_block))
163 /* Invert the conditional branch. */
164 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
168 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
169 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
176 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
178 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
179 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
181 /* Delete the block with the unconditional jump, and clean up the mess. */
182 flow_delete_block (jump_block);
183 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
188 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
189 on register. Used by jump threading. */
192 mark_effect (exp, nonequal)
198 switch (GET_CODE (exp))
200 /* In case we do clobber the register, mark it as equal, as we know the
201 value is dead so it don't have to match. */
203 if (REG_P (XEXP (exp, 0)))
205 dest = XEXP (exp, 0);
206 regno = REGNO (dest);
207 CLEAR_REGNO_REG_SET (nonequal, regno);
208 if (regno < FIRST_PSEUDO_REGISTER)
210 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
212 CLEAR_REGNO_REG_SET (nonequal, regno + n);
218 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
220 dest = SET_DEST (exp);
225 regno = REGNO (dest);
226 SET_REGNO_REG_SET (nonequal, regno);
227 if (regno < FIRST_PSEUDO_REGISTER)
229 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
231 SET_REGNO_REG_SET (nonequal, regno + n);
239 /* Attempt to prove that the basic block B will have no side effects and
240 allways continues in the same edge if reached via E. Return the edge
241 if exist, NULL otherwise. */
244 thread_jump (mode, e, b)
249 rtx set1, set2, cond1, cond2, insn;
250 enum rtx_code code1, code2, reversed_code2;
251 bool reverse1 = false;
256 /* At the moment, we do handle only conditional jumps, but later we may
257 want to extend this code to tablejumps and others. */
258 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
260 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
263 /* Second branch must end with onlyjump, as we will eliminate the jump. */
264 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
265 || !onlyjump_p (b->end))
268 set1 = pc_set (e->src->end);
269 set2 = pc_set (b->end);
270 if (((e->flags & EDGE_FALLTHRU) != 0)
271 != (XEXP (SET_SRC (set1), 0) == pc_rtx))
274 cond1 = XEXP (SET_SRC (set1), 0);
275 cond2 = XEXP (SET_SRC (set2), 0);
277 code1 = reversed_comparison_code (cond1, b->end);
279 code1 = GET_CODE (cond1);
281 code2 = GET_CODE (cond2);
282 reversed_code2 = reversed_comparison_code (cond2, b->end);
284 if (!comparison_dominates_p (code1, code2)
285 && !comparison_dominates_p (code1, reversed_code2))
288 /* Ensure that the comparison operators are equivalent.
289 ??? This is far too pesimistic. We should allow swapped operands,
290 different CCmodes, or for example comparisons for interval, that
291 dominate even when operands are not equivalent. */
292 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
293 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
296 /* Short circuit cases where block B contains some side effects, as we can't
298 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
299 insn = NEXT_INSN (insn))
300 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
305 /* First process all values computed in the source basic block. */
306 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
307 insn = NEXT_INSN (insn))
309 cselib_process_insn (insn);
311 nonequal = BITMAP_XMALLOC();
312 CLEAR_REG_SET (nonequal);
314 /* Now assume that we've continued by the edge E to B and continue
315 processing as if it were same basic block.
316 Our goal is to prove that whole block is an NOOP. */
318 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
319 insn = NEXT_INSN (insn))
323 rtx pat = PATTERN (insn);
325 if (GET_CODE (pat) == PARALLEL)
327 for (i = 0; i < XVECLEN (pat, 0); i++)
328 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
331 failed |= mark_effect (pat, nonequal);
334 cselib_process_insn (insn);
337 /* Later we should clear nonequal of dead registers. So far we don't
338 have life information in cfg_cleanup. */
342 /* In case liveness information is available, we need to prove equivalence
343 only of the live values. */
344 if (mode & CLEANUP_UPDATE_LIFE)
345 AND_REG_SET (nonequal, b->global_live_at_end);
347 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
349 BITMAP_XFREE (nonequal);
351 if ((comparison_dominates_p (code1, code2) != 0)
352 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
353 return BRANCH_EDGE (b);
355 return FALLTHRU_EDGE (b);
358 BITMAP_XFREE (nonequal);
363 /* Attempt to forward edges leaving basic block B.
364 Return true if successful. */
367 try_forward_edges (mode, b)
371 bool changed = false;
372 edge e, next, *threaded_edges = NULL;
373 int nthreaded_edges = 0;
375 for (e = b->succ; e; e = next)
377 basic_block target, first;
379 bool threaded = false;
383 /* Skip complex edges because we don't know how to update them.
385 Still handle fallthru edges, as we can succeed to forward fallthru
386 edge to the same place as the branch edge of conditional branch
387 and turn conditional branch to an unconditional branch. */
388 if (e->flags & EDGE_COMPLEX)
391 target = first = e->dest;
394 while (counter < n_basic_blocks)
396 basic_block new_target = NULL;
397 bool new_target_threaded = false;
399 if (FORWARDER_BLOCK_P (target)
400 && target->succ->dest != EXIT_BLOCK_PTR)
402 /* Bypass trivial infinite loops. */
403 if (target == target->succ->dest)
404 counter = n_basic_blocks;
405 new_target = target->succ->dest;
408 /* Allow to thread only over one edge at time to simplify updating
410 else if (mode & CLEANUP_THREADING)
412 edge t = thread_jump (mode, e, target);
415 new_target = t->dest;
416 new_target_threaded = true;
417 if (!nthreaded_edges)
418 threaded_edges = xmalloc (sizeof (*threaded_edges)
420 threaded_edges[nthreaded_edges++] = t;
427 /* Avoid killing of loop pre-headers, as it is the place loop
428 optimizer wants to hoist code to.
430 For fallthru forwarders, the LOOP_BEG note must appear between
431 the header of block and CODE_LABEL of the loop, for non forwarders
432 it must appear before the JUMP_INSN. */
433 if (mode & CLEANUP_PRE_LOOP)
435 rtx insn = (target->succ->flags & EDGE_FALLTHRU
436 ? target->head : prev_nonnote_insn (target->end));
438 if (GET_CODE (insn) != NOTE)
439 insn = NEXT_INSN (insn);
441 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
442 insn = NEXT_INSN (insn))
443 if (GET_CODE (insn) == NOTE
444 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
447 if (GET_CODE (insn) == NOTE)
453 threaded |= new_target_threaded;
456 if (counter >= n_basic_blocks)
459 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
462 else if (target == first)
463 ; /* We didn't do anything. */
466 /* Save the values now, as the edge may get removed. */
467 gcov_type edge_count = e->count;
468 int edge_probability = e->probability;
472 /* Don't force if target is exit block. */
473 if (threaded && target != EXIT_BLOCK_PTR)
475 notice_new_block (redirect_edge_and_branch_force (e, target));
477 fprintf (rtl_dump_file, "Conditionals threaded.\n");
479 else if (!redirect_edge_and_branch (e, target))
482 fprintf (rtl_dump_file,
483 "Forwarding edge %i->%i to %i failed.\n",
484 b->index, e->dest->index, target->index);
488 /* We successfully forwarded the edge. Now update profile
489 data: for each edge we traversed in the chain, remove
490 the original edge's execution count. */
491 edge_frequency = ((edge_probability * b->frequency
492 + REG_BR_PROB_BASE / 2)
495 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
496 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
497 BB_SET_FLAG (b, BB_UPDATE_LIFE);
503 first->count -= edge_count;
504 first->succ->count -= edge_count;
505 first->frequency -= edge_frequency;
506 if (first->succ->succ_next)
507 t = threaded_edges [n++];
513 while (first != target);
520 free (threaded_edges);
524 /* Return true if LABEL is a target of JUMP_INSN. This applies only
525 to non-complex jumps. That is, direct unconditional, conditional,
526 and tablejumps, but not computed jumps or returns. It also does
527 not apply to the fallthru case of a conditional jump. */
530 label_is_jump_target_p (label, jump_insn)
531 rtx label, jump_insn;
533 rtx tmp = JUMP_LABEL (jump_insn);
539 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
540 && GET_CODE (tmp) == JUMP_INSN
541 && (tmp = PATTERN (tmp),
542 GET_CODE (tmp) == ADDR_VEC
543 || GET_CODE (tmp) == ADDR_DIFF_VEC))
545 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
546 int i, veclen = GET_NUM_ELEM (vec);
548 for (i = 0; i < veclen; ++i)
549 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
556 /* Return true if LABEL is used for tail recursion. */
559 tail_recursion_label_p (label)
564 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
565 if (label == XEXP (x, 0))
571 /* Blocks A and B are to be merged into a single block. A has no incoming
572 fallthru edge, so it can be moved before B without adding or modifying
573 any jumps (aside from the jump from A to B). */
576 merge_blocks_move_predecessor_nojumps (a, b)
582 barrier = next_nonnote_insn (a->end);
583 if (GET_CODE (barrier) != BARRIER)
585 delete_insn (barrier);
587 /* Move block and loop notes out of the chain so that we do not
590 ??? A better solution would be to squeeze out all the non-nested notes
591 and adjust the block trees appropriately. Even better would be to have
592 a tighter connection between block trees and rtl so that this is not
594 if (squeeze_notes (&a->head, &a->end))
597 /* Scramble the insn chain. */
598 if (a->end != PREV_INSN (b->head))
599 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
600 BB_SET_FLAG (a, BB_UPDATE_LIFE);
603 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
606 /* Swap the records for the two blocks around. Although we are deleting B,
607 A is now where B was and we want to compact the BB array from where
609 BASIC_BLOCK (a->index) = b;
610 BASIC_BLOCK (b->index) = a;
615 /* Now blocks A and B are contiguous. Merge them. */
616 merge_blocks_nomove (a, b);
619 /* Blocks A and B are to be merged into a single block. B has no outgoing
620 fallthru edge, so it can be moved after A without adding or modifying
621 any jumps (aside from the jump from A to B). */
624 merge_blocks_move_successor_nojumps (a, b)
627 rtx barrier, real_b_end;
630 barrier = NEXT_INSN (b->end);
632 /* Recognize a jump table following block B. */
634 && GET_CODE (barrier) == CODE_LABEL
635 && NEXT_INSN (barrier)
636 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
637 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
638 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
640 /* Temporarily add the table jump insn to b, so that it will also
641 be moved to the correct location. */
642 b->end = NEXT_INSN (barrier);
643 barrier = NEXT_INSN (b->end);
646 /* There had better have been a barrier there. Delete it. */
647 if (barrier && GET_CODE (barrier) == BARRIER)
648 delete_insn (barrier);
650 /* Move block and loop notes out of the chain so that we do not
653 ??? A better solution would be to squeeze out all the non-nested notes
654 and adjust the block trees appropriately. Even better would be to have
655 a tighter connection between block trees and rtl so that this is not
657 if (squeeze_notes (&b->head, &b->end))
660 /* Scramble the insn chain. */
661 reorder_insns_nobb (b->head, b->end, a->end);
663 /* Restore the real end of b. */
666 /* Now blocks A and B are contiguous. Merge them. */
667 merge_blocks_nomove (a, b);
668 BB_SET_FLAG (a, BB_UPDATE_LIFE);
671 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
675 /* Attempt to merge basic blocks that are potentially non-adjacent.
676 Return true iff the attempt succeeded. */
679 merge_blocks (e, b, c, mode)
684 /* If C has a tail recursion label, do not merge. There is no
685 edge recorded from the call_placeholder back to this label, as
686 that would make optimize_sibling_and_tail_recursive_calls more
687 complex for no gain. */
688 if ((mode & CLEANUP_PRE_SIBCALL)
689 && GET_CODE (c->head) == CODE_LABEL
690 && tail_recursion_label_p (c->head))
693 /* If B has a fallthru edge to C, no need to move anything. */
694 if (e->flags & EDGE_FALLTHRU)
696 /* We need to update liveness in case C already has broken liveness
697 or B ends by conditional jump to next instructions that will be
699 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
700 || GET_CODE (b->end) == JUMP_INSN)
701 BB_SET_FLAG (b, BB_UPDATE_LIFE);
702 merge_blocks_nomove (b, c);
703 update_forwarder_flag (b);
706 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
712 /* Otherwise we will need to move code around. Do that only if expensive
713 transformations are allowed. */
714 else if (mode & CLEANUP_EXPENSIVE)
716 edge tmp_edge, b_fallthru_edge;
717 bool c_has_outgoing_fallthru;
718 bool b_has_incoming_fallthru;
720 /* Avoid overactive code motion, as the forwarder blocks should be
721 eliminated by edge redirection instead. One exception might have
722 been if B is a forwarder block and C has no fallthru edge, but
723 that should be cleaned up by bb-reorder instead. */
724 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
727 /* We must make sure to not munge nesting of lexical blocks,
728 and loop notes. This is done by squeezing out all the notes
729 and leaving them there to lie. Not ideal, but functional. */
731 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
732 if (tmp_edge->flags & EDGE_FALLTHRU)
735 c_has_outgoing_fallthru = (tmp_edge != NULL);
737 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
738 if (tmp_edge->flags & EDGE_FALLTHRU)
741 b_has_incoming_fallthru = (tmp_edge != NULL);
742 b_fallthru_edge = tmp_edge;
744 /* Otherwise, we're going to try to move C after B. If C does
745 not have an outgoing fallthru, then it can be moved
746 immediately after B without introducing or modifying jumps. */
747 if (! c_has_outgoing_fallthru)
749 merge_blocks_move_successor_nojumps (b, c);
753 /* If B does not have an incoming fallthru, then it can be moved
754 immediately before C without introducing or modifying jumps.
755 C cannot be the first block, so we do not have to worry about
756 accessing a non-existent block. */
758 if (b_has_incoming_fallthru)
762 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
764 bb = force_nonfallthru (b_fallthru_edge);
766 notice_new_block (bb);
768 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
771 merge_blocks_move_predecessor_nojumps (b, c);
779 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
782 insns_match_p (mode, i1, i2)
783 int mode ATTRIBUTE_UNUSED;
788 /* Verify that I1 and I2 are equivalent. */
789 if (GET_CODE (i1) != GET_CODE (i2))
795 if (GET_CODE (p1) != GET_CODE (p2))
798 /* If this is a CALL_INSN, compare register usage information.
799 If we don't check this on stack register machines, the two
800 CALL_INSNs might be merged leaving reg-stack.c with mismatching
801 numbers of stack registers in the same basic block.
802 If we don't check this on machines with delay slots, a delay slot may
803 be filled that clobbers a parameter expected by the subroutine.
805 ??? We take the simple route for now and assume that if they're
806 equal, they were constructed identically. */
808 if (GET_CODE (i1) == CALL_INSN
809 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
810 CALL_INSN_FUNCTION_USAGE (i2)))
814 /* If cross_jump_death_matters is not 0, the insn's mode
815 indicates whether or not the insn contains any stack-like
818 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
820 /* If register stack conversion has already been done, then
821 death notes must also be compared before it is certain that
822 the two instruction streams match. */
825 HARD_REG_SET i1_regset, i2_regset;
827 CLEAR_HARD_REG_SET (i1_regset);
828 CLEAR_HARD_REG_SET (i2_regset);
830 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
831 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
832 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
834 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
835 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
836 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
838 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
848 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
850 /* The following code helps take care of G++ cleanups. */
851 rtx equiv1 = find_reg_equal_equiv_note (i1);
852 rtx equiv2 = find_reg_equal_equiv_note (i2);
855 /* If the equivalences are not to a constant, they may
856 reference pseudos that no longer exist, so we can't
858 && (! reload_completed
859 || (CONSTANT_P (XEXP (equiv1, 0))
860 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
862 rtx s1 = single_set (i1);
863 rtx s2 = single_set (i2);
864 if (s1 != 0 && s2 != 0
865 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
867 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
868 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
869 if (! rtx_renumbered_equal_p (p1, p2))
871 else if (apply_change_group ())
882 /* Look through the insns at the end of BB1 and BB2 and find the longest
883 sequence that are equivalent. Store the first insns for that sequence
884 in *F1 and *F2 and return the sequence length.
886 To simplify callers of this function, if the blocks match exactly,
887 store the head of the blocks in *F1 and *F2. */
890 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
891 int mode ATTRIBUTE_UNUSED;
892 basic_block bb1, bb2;
895 rtx i1, i2, last1, last2, afterlast1, afterlast2;
898 /* Skip simple jumps at the end of the blocks. Complex jumps still
899 need to be compared for equivalence, which we'll do below. */
902 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
904 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
912 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
915 /* Count everything except for unconditional jump as insn. */
916 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
924 while (!active_insn_p (i1) && i1 != bb1->head)
927 while (!active_insn_p (i2) && i2 != bb2->head)
930 if (i1 == bb1->head || i2 == bb2->head)
933 if (!insns_match_p (mode, i1, i2))
936 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
937 if (active_insn_p (i1))
939 /* If the merged insns have different REG_EQUAL notes, then
941 rtx equiv1 = find_reg_equal_equiv_note (i1);
942 rtx equiv2 = find_reg_equal_equiv_note (i2);
944 if (equiv1 && !equiv2)
945 remove_note (i1, equiv1);
946 else if (!equiv1 && equiv2)
947 remove_note (i2, equiv2);
948 else if (equiv1 && equiv2
949 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
951 remove_note (i1, equiv1);
952 remove_note (i2, equiv2);
955 afterlast1 = last1, afterlast2 = last2;
956 last1 = i1, last2 = i2;
965 /* Don't allow the insn after a compare to be shared by
966 cross-jumping unless the compare is also shared. */
967 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
968 last1 = afterlast1, last2 = afterlast2, ninsns--;
971 /* Include preceding notes and labels in the cross-jump. One,
972 this may bring us to the head of the blocks as requested above.
973 Two, it keeps line number notes as matched as may be. */
976 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
977 last1 = PREV_INSN (last1);
979 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
980 last1 = PREV_INSN (last1);
982 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
983 last2 = PREV_INSN (last2);
985 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
986 last2 = PREV_INSN (last2);
995 /* Return true iff outgoing edges of BB1 and BB2 match, together with
996 the branch instruction. This means that if we commonize the control
997 flow before end of the basic block, the semantic remains unchanged.
999 We may assume that there exists one edge with a common destination. */
1002 outgoing_edges_match (mode, bb1, bb2)
1007 int nehedges1 = 0, nehedges2 = 0;
1008 edge fallthru1 = 0, fallthru2 = 0;
1011 /* If BB1 has only one successor, we may be looking at either an
1012 unconditional jump, or a fake edge to exit. */
1013 if (bb1->succ && !bb1->succ->succ_next
1014 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
1015 return (bb2->succ && !bb2->succ->succ_next
1016 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
1018 /* Match conditional jumps - this may get tricky when fallthru and branch
1019 edges are crossed. */
1021 && bb1->succ->succ_next
1022 && !bb1->succ->succ_next->succ_next
1023 && any_condjump_p (bb1->end)
1024 && onlyjump_p (bb1->end))
1026 edge b1, f1, b2, f2;
1027 bool reverse, match;
1028 rtx set1, set2, cond1, cond2;
1029 enum rtx_code code1, code2;
1032 || !bb2->succ->succ_next
1033 || bb1->succ->succ_next->succ_next
1034 || !any_condjump_p (bb2->end)
1035 || !onlyjump_p (bb1->end))
1038 b1 = BRANCH_EDGE (bb1);
1039 b2 = BRANCH_EDGE (bb2);
1040 f1 = FALLTHRU_EDGE (bb1);
1041 f2 = FALLTHRU_EDGE (bb2);
1043 /* Get around possible forwarders on fallthru edges. Other cases
1044 should be optimized out already. */
1045 if (FORWARDER_BLOCK_P (f1->dest))
1046 f1 = f1->dest->succ;
1048 if (FORWARDER_BLOCK_P (f2->dest))
1049 f2 = f2->dest->succ;
1051 /* To simplify use of this function, return false if there are
1052 unneeded forwarder blocks. These will get eliminated later
1053 during cleanup_cfg. */
1054 if (FORWARDER_BLOCK_P (f1->dest)
1055 || FORWARDER_BLOCK_P (f2->dest)
1056 || FORWARDER_BLOCK_P (b1->dest)
1057 || FORWARDER_BLOCK_P (b2->dest))
1060 if (f1->dest == f2->dest && b1->dest == b2->dest)
1062 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1067 set1 = pc_set (bb1->end);
1068 set2 = pc_set (bb2->end);
1069 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1070 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1073 cond1 = XEXP (SET_SRC (set1), 0);
1074 cond2 = XEXP (SET_SRC (set2), 0);
1075 code1 = GET_CODE (cond1);
1077 code2 = reversed_comparison_code (cond2, bb2->end);
1079 code2 = GET_CODE (cond2);
1081 if (code2 == UNKNOWN)
1084 /* Verify codes and operands match. */
1085 match = ((code1 == code2
1086 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1087 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1088 || (code1 == swap_condition (code2)
1089 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1091 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1094 /* If we return true, we will join the blocks. Which means that
1095 we will only have one branch prediction bit to work with. Thus
1096 we require the existing branches to have probabilities that are
1098 /* ??? We should use bb->frequency to allow merging in infrequently
1099 executed blocks, but at the moment it is not available when
1100 cleanup_cfg is run. */
1101 if (match && !optimize_size)
1106 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
1107 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
1111 prob1 = INTVAL (XEXP (note1, 0));
1112 prob2 = INTVAL (XEXP (note2, 0));
1114 prob2 = REG_BR_PROB_BASE - prob2;
1116 /* Fail if the difference in probabilities is
1118 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
1122 else if (note1 || note2)
1126 if (rtl_dump_file && match)
1127 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1128 bb1->index, bb2->index);
1133 /* Generic case - we are seeing an computed jump, table jump or trapping
1136 /* First ensure that the instructions match. There may be many outgoing
1137 edges so this test is generally cheaper.
1138 ??? Currently the tablejumps will never match, as they do have
1139 different tables. */
1140 if (!insns_match_p (mode, bb1->end, bb2->end))
1143 /* Search the outgoing edges, ensure that the counts do match, find possible
1144 fallthru and exception handling edges since these needs more
1146 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1147 e1 = e1->succ_next, e2 = e2->succ_next)
1149 if (e1->flags & EDGE_EH)
1152 if (e2->flags & EDGE_EH)
1155 if (e1->flags & EDGE_FALLTHRU)
1157 if (e2->flags & EDGE_FALLTHRU)
1161 /* If number of edges of various types does not match, fail. */
1163 || nehedges1 != nehedges2
1164 || (fallthru1 != 0) != (fallthru2 != 0))
1167 /* fallthru edges must be forwarded to the same destination. */
1170 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1171 ? fallthru1->dest->succ->dest: fallthru1->dest);
1172 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1173 ? fallthru2->dest->succ->dest: fallthru2->dest);
1179 /* In case we do have EH edges, ensure we are in the same region. */
1182 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1183 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1185 if (XEXP (n1, 0) != XEXP (n2, 0))
1189 /* We don't need to match the rest of edges as above checks should be enought
1190 to ensure that they are equivalent. */
1194 /* E1 and E2 are edges with the same destination block. Search their
1195 predecessors for common code. If found, redirect control flow from
1196 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1199 try_crossjump_to_edge (mode, e1, e2)
1204 basic_block src1 = e1->src, src2 = e2->src;
1205 basic_block redirect_to;
1206 rtx newpos1, newpos2;
1212 /* Search backward through forwarder blocks. We don't need to worry
1213 about multiple entry or chained forwarders, as they will be optimized
1214 away. We do this to look past the unconditional jump following a
1215 conditional jump that is required due to the current CFG shape. */
1217 && !src1->pred->pred_next
1218 && FORWARDER_BLOCK_P (src1))
1219 e1 = src1->pred, src1 = e1->src;
1222 && !src2->pred->pred_next
1223 && FORWARDER_BLOCK_P (src2))
1224 e2 = src2->pred, src2 = e2->src;
1226 /* Nothing to do if we reach ENTRY, or a common source block. */
1227 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1232 /* Seeing more than 1 forwarder blocks would confuse us later... */
1233 if (FORWARDER_BLOCK_P (e1->dest)
1234 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1237 if (FORWARDER_BLOCK_P (e2->dest)
1238 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1241 /* Likewise with dead code (possibly newly created by the other optimizations
1243 if (!src1->pred || !src2->pred)
1246 /* Look for the common insn sequence, part the first ... */
1247 if (!outgoing_edges_match (mode, src1, src2))
1250 /* ... and part the second. */
1251 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1255 /* Avoid splitting if possible. */
1256 if (newpos2 == src2->head)
1261 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1262 src2->index, nmatch);
1263 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1267 fprintf (rtl_dump_file,
1268 "Cross jumping from bb %i to bb %i; %i common insns\n",
1269 src1->index, src2->index, nmatch);
1271 redirect_to->count += src1->count;
1272 redirect_to->frequency += src1->frequency;
1274 /* Recompute the frequencies and counts of outgoing edges. */
1275 for (s = redirect_to->succ; s; s = s->succ_next)
1278 basic_block d = s->dest;
1280 if (FORWARDER_BLOCK_P (d))
1283 for (s2 = src1->succ; ; s2 = s2->succ_next)
1285 basic_block d2 = s2->dest;
1286 if (FORWARDER_BLOCK_P (d2))
1287 d2 = d2->succ->dest;
1292 s->count += s2->count;
1294 /* Take care to update possible forwarder blocks. We verified
1295 that there is no more than one in the chain, so we can't run
1296 into infinite loop. */
1297 if (FORWARDER_BLOCK_P (s->dest))
1299 s->dest->succ->count += s2->count;
1300 s->dest->count += s2->count;
1301 s->dest->frequency += EDGE_FREQUENCY (s);
1304 if (FORWARDER_BLOCK_P (s2->dest))
1306 s2->dest->succ->count -= s2->count;
1307 s2->dest->count -= s2->count;
1308 s2->dest->frequency -= EDGE_FREQUENCY (s);
1311 if (!redirect_to->frequency && !src1->frequency)
1312 s->probability = (s->probability + s2->probability) / 2;
1315 = ((s->probability * redirect_to->frequency +
1316 s2->probability * src1->frequency)
1317 / (redirect_to->frequency + src1->frequency));
1320 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
1322 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
1324 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1326 /* Skip possible basic block header. */
1327 if (GET_CODE (newpos1) == CODE_LABEL)
1328 newpos1 = NEXT_INSN (newpos1);
1330 if (GET_CODE (newpos1) == NOTE)
1331 newpos1 = NEXT_INSN (newpos1);
1334 /* Emit the jump insn. */
1335 label = block_label (redirect_to);
1336 emit_jump_insn_after (gen_jump (label), src1->end);
1337 JUMP_LABEL (src1->end) = label;
1338 LABEL_NUSES (label)++;
1340 /* Delete the now unreachable instructions. */
1341 delete_insn_chain (newpos1, last);
1343 /* Make sure there is a barrier after the new jump. */
1344 last = next_nonnote_insn (src1->end);
1345 if (!last || GET_CODE (last) != BARRIER)
1346 emit_barrier_after (src1->end);
1350 remove_edge (src1->succ);
1351 make_single_succ_edge (src1, redirect_to, 0);
1353 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1354 update_forwarder_flag (src1);
1359 /* Search the predecessors of BB for common insn sequences. When found,
1360 share code between them by redirecting control flow. Return true if
1361 any changes made. */
1364 try_crossjump_bb (mode, bb)
1368 edge e, e2, nexte2, nexte, fallthru;
1371 /* Nothing to do if there is not at least two incoming edges. */
1372 if (!bb->pred || !bb->pred->pred_next)
1375 /* It is always cheapest to redirect a block that ends in a branch to
1376 a block that falls through into BB, as that adds no branches to the
1377 program. We'll try that combination first. */
1378 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1379 if (fallthru->flags & EDGE_FALLTHRU)
1383 for (e = bb->pred; e; e = nexte)
1385 nexte = e->pred_next;
1387 /* As noted above, first try with the fallthru predecessor. */
1390 /* Don't combine the fallthru edge into anything else.
1391 If there is a match, we'll do it the other way around. */
1395 if (try_crossjump_to_edge (mode, e, fallthru))
1403 /* Non-obvious work limiting check: Recognize that we're going
1404 to call try_crossjump_bb on every basic block. So if we have
1405 two blocks with lots of outgoing edges (a switch) and they
1406 share lots of common destinations, then we would do the
1407 cross-jump check once for each common destination.
1409 Now, if the blocks actually are cross-jump candidates, then
1410 all of their destinations will be shared. Which means that
1411 we only need check them for cross-jump candidacy once. We
1412 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1413 choosing to do the check from the block for which the edge
1414 in question is the first successor of A. */
1415 if (e->src->succ != e)
1418 for (e2 = bb->pred; e2; e2 = nexte2)
1420 nexte2 = e2->pred_next;
1425 /* We've already checked the fallthru edge above. */
1429 /* The "first successor" check above only prevents multiple
1430 checks of crossjump(A,B). In order to prevent redundant
1431 checks of crossjump(B,A), require that A be the block
1432 with the lowest index. */
1433 if (e->src->index > e2->src->index)
1436 if (try_crossjump_to_edge (mode, e, e2))
1448 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1449 instructions etc. Return nonzero if changes were made. */
1452 try_optimize_cfg (mode)
1456 bool changed_overall = false;
1461 if (mode & CLEANUP_CROSSJUMP)
1462 add_noreturn_fake_exit_edges ();
1464 for (i = 0; i < n_basic_blocks; i++)
1465 update_forwarder_flag (BASIC_BLOCK (i));
1467 /* Attempt to merge blocks as made possible by edge removal. If a block
1468 has only one successor, and the successor has only one predecessor,
1469 they may be combined. */
1476 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1479 for (i = 0; i < n_basic_blocks;)
1481 basic_block c, b = BASIC_BLOCK (i);
1483 bool changed_here = false;
1485 /* Delete trivially dead basic blocks. */
1486 while (b->pred == NULL)
1488 c = BASIC_BLOCK (b->index - 1);
1490 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1492 flow_delete_block (b);
1497 /* Remove code labels no longer used. Don't do this before
1498 CALL_PLACEHOLDER is removed, as some branches may be hidden
1500 if (b->pred->pred_next == NULL
1501 && (b->pred->flags & EDGE_FALLTHRU)
1502 && !(b->pred->flags & EDGE_COMPLEX)
1503 && GET_CODE (b->head) == CODE_LABEL
1504 && (!(mode & CLEANUP_PRE_SIBCALL)
1505 || !tail_recursion_label_p (b->head))
1506 /* If the previous block ends with a branch to this block,
1507 we can't delete the label. Normally this is a condjump
1508 that is yet to be simplified, but if CASE_DROPS_THRU,
1509 this can be a tablejump with some element going to the
1510 same place as the default (fallthru). */
1511 && (b->pred->src == ENTRY_BLOCK_PTR
1512 || GET_CODE (b->pred->src->end) != JUMP_INSN
1513 || ! label_is_jump_target_p (b->head, b->pred->src->end)))
1515 rtx label = b->head;
1517 b->head = NEXT_INSN (b->head);
1518 delete_insn_chain (label, label);
1520 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1524 /* If we fall through an empty block, we can remove it. */
1525 if (b->pred->pred_next == NULL
1526 && (b->pred->flags & EDGE_FALLTHRU)
1527 && GET_CODE (b->head) != CODE_LABEL
1528 && FORWARDER_BLOCK_P (b)
1529 /* Note that forwarder_block_p true ensures that there
1530 is a successor for this block. */
1531 && (b->succ->flags & EDGE_FALLTHRU)
1532 && n_basic_blocks > 1)
1535 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1538 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1539 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1540 flow_delete_block (b);
1545 /* Merge blocks. Loop because chains of blocks might be
1547 while ((s = b->succ) != NULL
1548 && s->succ_next == NULL
1549 && !(s->flags & EDGE_COMPLEX)
1550 && (c = s->dest) != EXIT_BLOCK_PTR
1551 && c->pred->pred_next == NULL
1552 /* If the jump insn has side effects,
1553 we can't kill the edge. */
1554 && (GET_CODE (b->end) != JUMP_INSN
1555 || onlyjump_p (b->end))
1556 && merge_blocks (s, b, c, mode))
1557 changed_here = true;
1559 /* Simplify branch over branch. */
1560 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1562 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1563 changed_here = true;
1566 /* If B has a single outgoing edge, but uses a non-trivial jump
1567 instruction without side-effects, we can either delete the
1568 jump entirely, or replace it with a simple unconditional jump.
1569 Use redirect_edge_and_branch to do the dirty work. */
1571 && ! b->succ->succ_next
1572 && b->succ->dest != EXIT_BLOCK_PTR
1573 && onlyjump_p (b->end)
1574 && redirect_edge_and_branch (b->succ, b->succ->dest))
1576 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1577 update_forwarder_flag (b);
1578 changed_here = true;
1581 /* Simplify branch to branch. */
1582 if (try_forward_edges (mode, b))
1583 changed_here = true;
1585 /* Look for shared code between blocks. */
1586 if ((mode & CLEANUP_CROSSJUMP)
1587 && try_crossjump_bb (mode, b))
1588 changed_here = true;
1590 /* Don't get confused by the index shift caused by deleting
1598 if ((mode & CLEANUP_CROSSJUMP)
1599 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1602 #ifdef ENABLE_CHECKING
1604 verify_flow_info ();
1607 changed_overall |= changed;
1611 if (mode & CLEANUP_CROSSJUMP)
1612 remove_fake_edges ();
1614 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1618 blocks = sbitmap_alloc (n_basic_blocks);
1619 sbitmap_zero (blocks);
1620 for (i = 0; i < n_basic_blocks; i++)
1621 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1624 SET_BIT (blocks, i);
1628 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1629 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1630 | PROP_KILL_DEAD_CODE);
1631 sbitmap_free (blocks);
1634 for (i = 0; i < n_basic_blocks; i++)
1635 BASIC_BLOCK (i)->aux = NULL;
1637 return changed_overall;
1640 /* Delete all unreachable basic blocks. */
1643 delete_unreachable_blocks ()
1646 bool changed = false;
1648 find_unreachable_blocks ();
1650 /* Delete all unreachable basic blocks. Count down so that we
1651 don't interfere with the block renumbering that happens in
1652 flow_delete_block. */
1654 for (i = n_basic_blocks - 1; i >= 0; --i)
1656 basic_block b = BASIC_BLOCK (i);
1658 if (!(b->flags & BB_REACHABLE))
1659 flow_delete_block (b), changed = true;
1663 tidy_fallthru_edges ();
1667 /* Tidy the CFG by deleting unreachable code and whatnot. */
1673 bool changed = false;
1675 timevar_push (TV_CLEANUP_CFG);
1676 changed = delete_unreachable_blocks ();
1677 if (try_optimize_cfg (mode))
1678 delete_unreachable_blocks (), changed = true;
1680 /* Kill the data we won't maintain. */
1681 free_EXPR_LIST_list (&label_value_list);
1682 free_EXPR_LIST_list (&tail_recursion_label_list);
1683 timevar_pop (TV_CLEANUP_CFG);