1 /* Static Single Assignment conversion routines for the GNU compiler.
2 Copyright (C) 2000 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 GNU CC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 Building an Optimizing Compiler
25 Butterworth-Heinemann, 1998
27 Static Single Assignment Construction
28 Preston Briggs, Tim Harvey, Taylor Simpson
29 Technical Report, Rice University, 1995
30 ftp://ftp.cs.rice.edu/public/preston/optimizer/SSA.ps.gz
38 #include "hard-reg-set.h"
42 #include "insn-config.h"
44 #include "basic-block.h"
46 #include "partition.h"
51 Handle subregs better, maybe. For now, if a reg that's set in a
52 subreg expression is duplicated going into SSA form, an extra copy
53 is inserted first that copies the entire reg into the duplicate, so
54 that the other bits are preserved. This isn't strictly SSA, since
55 at least part of the reg is assigned in more than one place (though
58 ??? What to do about strict_low_part. Probably I'll have to split
59 them out of their current instructions first thing.
61 Actually the best solution may be to have a kind of "mid-level rtl"
62 in which the RTL encodes exactly what we want, without exposing a
63 lot of niggling processor details. At some later point we lower
64 the representation, calling back into optabs to finish any necessary
68 /* If conservative_reg_partition is non-zero, use a conservative
69 register partitioning algorithm (which leaves more regs after
70 emerging from SSA) instead of the coalescing one. This is being
71 left in for a limited time only, as a debugging tool until the
72 coalescing algorithm is validated. */
73 static int conservative_reg_partition;
75 /* This flag is set when the CFG is in SSA form. */
78 /* Element I is the single instruction that sets register I+PSEUDO. */
79 varray_type ssa_definition;
81 /* Element I is an INSN_LIST of instructions that use register I+PSEUDO. */
84 /* Element I-PSEUDO is the normal register that originated the ssa
85 register in question. */
86 varray_type ssa_rename_from;
88 /* The running target ssa register for a given normal register. */
89 static rtx *ssa_rename_to;
91 /* The number of registers that were live on entry to the SSA routines. */
92 static unsigned int ssa_max_reg_num;
94 /* Local function prototypes. */
96 struct rename_context;
98 static inline rtx * phi_alternative
101 static rtx first_insn_after_basic_block_note PARAMS ((basic_block));
103 static int remove_phi_alternative
105 static void simplify_to_immediate_dominators
106 PARAMS ((int *idom, sbitmap *dominators));
107 static void compute_dominance_frontiers_1
108 PARAMS ((sbitmap *frontiers, int *idom, int bb, sbitmap done));
109 static void compute_dominance_frontiers
110 PARAMS ((sbitmap *frontiers, int *idom));
111 static void find_evaluations_1
112 PARAMS ((rtx dest, rtx set, void *data));
113 static void find_evaluations
114 PARAMS ((sbitmap *evals, int nregs));
115 static void compute_iterated_dominance_frontiers
116 PARAMS ((sbitmap *idfs, sbitmap *frontiers, sbitmap *evals, int nregs));
117 static void insert_phi_node
118 PARAMS ((int regno, int b));
119 static void insert_phi_nodes
120 PARAMS ((sbitmap *idfs, sbitmap *evals, int nregs));
121 static void create_delayed_rename
122 PARAMS ((struct rename_context *, rtx *));
123 static void apply_delayed_renames
124 PARAMS ((struct rename_context *));
125 static int rename_insn_1
126 PARAMS ((rtx *ptr, void *data));
127 static void rename_block
128 PARAMS ((int b, int *idom));
129 static void rename_registers
130 PARAMS ((int nregs, int *idom));
132 static inline int ephi_add_node
133 PARAMS ((rtx reg, rtx *nodes, int *n_nodes));
134 static int * ephi_forward
135 PARAMS ((int t, sbitmap visited, sbitmap *succ, int *tstack));
136 static void ephi_backward
137 PARAMS ((int t, sbitmap visited, sbitmap *pred, rtx *nodes));
138 static void ephi_create
139 PARAMS ((int t, sbitmap visited, sbitmap *pred, sbitmap *succ, rtx *nodes));
140 static void eliminate_phi
141 PARAMS ((edge e, partition reg_partition));
142 static int make_regs_equivalent_over_bad_edges
143 PARAMS ((int bb, partition reg_partition));
145 /* These are used only in the conservative register partitioning
147 static int make_equivalent_phi_alternatives_equivalent
148 PARAMS ((int bb, partition reg_partition));
149 static partition compute_conservative_reg_partition
151 static int rename_equivalent_regs_in_insn
152 PARAMS ((rtx *ptr, void *data));
154 /* These are used in the register coalescing algorithm. */
155 static int coalesce_if_unconflicting
156 PARAMS ((partition p, conflict_graph conflicts, int reg1, int reg2));
157 static int coalesce_regs_in_copies
158 PARAMS ((basic_block bb, partition p, conflict_graph conflicts));
159 static int coalesce_reg_in_phi
160 PARAMS ((rtx, int dest_regno, int src_regno, void *data));
161 static int coalesce_regs_in_successor_phi_nodes
162 PARAMS ((basic_block bb, partition p, conflict_graph conflicts));
163 static partition compute_coalesced_reg_partition
165 static int mark_reg_in_phi
166 PARAMS ((rtx *ptr, void *data));
167 static void mark_phi_and_copy_regs
168 PARAMS ((regset phi_set));
170 static int rename_equivalent_regs_in_insn
171 PARAMS ((rtx *ptr, void *data));
172 static void rename_equivalent_regs
173 PARAMS ((partition reg_partition));
176 /* Given the SET of a PHI node, return the address of the alternative
177 for predecessor block C. */
180 phi_alternative (set, c)
184 rtvec phi_vec = XVEC (SET_SRC (set), 0);
187 for (v = GET_NUM_ELEM (phi_vec) - 2; v >= 0; v -= 2)
188 if (INTVAL (RTVEC_ELT (phi_vec, v + 1)) == c)
189 return &RTVEC_ELT (phi_vec, v);
194 /* Given the SET of a phi node, remove the alternative for predecessor
195 block C. Return non-zero on success, or zero if no alternative is
199 remove_phi_alternative (set, c)
203 rtvec phi_vec = XVEC (SET_SRC (set), 0);
204 int num_elem = GET_NUM_ELEM (phi_vec);
207 for (v = num_elem - 2; v >= 0; v -= 2)
208 if (INTVAL (RTVEC_ELT (phi_vec, v + 1)) == c)
210 if (v < num_elem - 2)
212 RTVEC_ELT (phi_vec, v) = RTVEC_ELT (phi_vec, num_elem - 2);
213 RTVEC_ELT (phi_vec, v + 1) = RTVEC_ELT (phi_vec, num_elem - 1);
215 PUT_NUM_ELEM (phi_vec, num_elem - 2);
222 /* Computing the Immediate Dominators:
224 Throughout, we don't actually want the full dominators set as
225 calculated by flow, but rather the immediate dominators.
229 simplify_to_immediate_dominators (idom, dominators)
236 tmp = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
238 /* Begin with tmp(n) = dom(n) - { n }. */
239 for (b = n_basic_blocks; --b >= 0; )
241 sbitmap_copy (tmp[b], dominators[b]);
242 RESET_BIT (tmp[b], b);
245 /* Subtract out all of our dominator's dominators. */
246 for (b = n_basic_blocks; --b >= 0; )
248 sbitmap tmp_b = tmp[b];
251 for (s = n_basic_blocks; --s >= 0; )
252 if (TEST_BIT (tmp_b, s))
253 sbitmap_difference (tmp_b, tmp_b, tmp[s]);
256 /* Find the one bit set in the bitmap and put it in the output array. */
257 for (b = n_basic_blocks; --b >= 0; )
260 EXECUTE_IF_SET_IN_SBITMAP (tmp[b], 0, t, { idom[b] = t; });
263 sbitmap_vector_free (tmp);
267 /* For all registers, find all blocks in which they are set.
269 This is the transform of what would be local kill information that
270 we ought to be getting from flow. */
272 static sbitmap *fe_evals;
273 static int fe_current_bb;
276 find_evaluations_1 (dest, set, data)
278 rtx set ATTRIBUTE_UNUSED;
279 void *data ATTRIBUTE_UNUSED;
281 if (GET_CODE (dest) == REG
282 && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
283 SET_BIT (fe_evals[REGNO (dest) - FIRST_PSEUDO_REGISTER], fe_current_bb);
287 find_evaluations (evals, nregs)
293 sbitmap_vector_zero (evals, nregs);
296 for (bb = n_basic_blocks; --bb >= 0; )
302 last = BLOCK_END (bb);
305 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
306 note_stores (PATTERN (p), find_evaluations_1, NULL);
316 /* Computing the Dominance Frontier:
318 As decribed in Morgan, section 3.5, this may be done simply by
319 walking the dominator tree bottom-up, computing the frontier for
320 the children before the parent. When considering a block B,
323 (1) A flow graph edge leaving B that does not lead to a child
324 of B in the dominator tree must be a block that is either equal
325 to B or not dominated by B. Such blocks belong in the frontier
328 (2) Consider a block X in the frontier of one of the children C
329 of B. If X is not equal to B and is not dominated by B, it
330 is in the frontier of B.
334 compute_dominance_frontiers_1 (frontiers, idom, bb, done)
340 basic_block b = BASIC_BLOCK (bb);
345 sbitmap_zero (frontiers[bb]);
347 /* Do the frontier of the children first. Not all children in the
348 dominator tree (blocks dominated by this one) are children in the
349 CFG, so check all blocks. */
350 for (c = 0; c < n_basic_blocks; ++c)
351 if (idom[c] == bb && ! TEST_BIT (done, c))
352 compute_dominance_frontiers_1 (frontiers, idom, c, done);
354 /* Find blocks conforming to rule (1) above. */
355 for (e = b->succ; e; e = e->succ_next)
357 if (e->dest == EXIT_BLOCK_PTR)
359 if (idom[e->dest->index] != bb)
360 SET_BIT (frontiers[bb], e->dest->index);
363 /* Find blocks conforming to rule (2). */
364 for (c = 0; c < n_basic_blocks; ++c)
368 EXECUTE_IF_SET_IN_SBITMAP (frontiers[c], 0, x,
371 SET_BIT (frontiers[bb], x);
377 compute_dominance_frontiers (frontiers, idom)
381 sbitmap done = sbitmap_alloc (n_basic_blocks);
384 compute_dominance_frontiers_1 (frontiers, idom, 0, done);
390 /* Computing the Iterated Dominance Frontier:
392 This is the set of merge points for a given register.
394 This is not particularly intuitive. See section 7.1 of Morgan, in
395 particular figures 7.3 and 7.4 and the immediately surrounding text.
399 compute_iterated_dominance_frontiers (idfs, frontiers, evals, nregs)
408 worklist = sbitmap_alloc (n_basic_blocks);
410 for (reg = 0; reg < nregs; ++reg)
412 sbitmap idf = idfs[reg];
415 /* Start the iterative process by considering those blocks that
416 evaluate REG. We'll add their dominance frontiers to the
417 IDF, and then consider the blocks we just added. */
418 sbitmap_copy (worklist, evals[reg]);
420 /* Morgan's algorithm is incorrect here. Blocks that evaluate
421 REG aren't necessarily in REG's IDF. Start with an empty IDF. */
424 /* Iterate until the worklist is empty. */
429 EXECUTE_IF_SET_IN_SBITMAP (worklist, 0, b,
431 RESET_BIT (worklist, b);
432 /* For each block on the worklist, add to the IDF all
433 blocks on its dominance frontier that aren't already
434 on the IDF. Every block that's added is also added
436 sbitmap_union_of_diff (worklist, worklist, frontiers[b], idf);
437 sbitmap_a_or_b (idf, idf, frontiers[b]);
444 sbitmap_free (worklist);
448 fprintf(rtl_dump_file,
449 "Iterated dominance frontier: %d passes on %d regs.\n",
454 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
455 note associated with the BLOCK. */
458 first_insn_after_basic_block_note (block)
463 /* Get the first instruction in the block. */
466 if (insn == NULL_RTX)
468 if (GET_CODE (insn) == CODE_LABEL)
469 insn = NEXT_INSN (insn);
470 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
473 return NEXT_INSN (insn);
477 /* Insert the phi nodes. */
480 insert_phi_node (regno, bb)
483 basic_block b = BASIC_BLOCK (bb);
491 /* Find out how many predecessors there are. */
492 for (e = b->pred, npred = 0; e; e = e->pred_next)
493 if (e->src != ENTRY_BLOCK_PTR)
496 /* If this block has no "interesting" preds, then there is nothing to
497 do. Consider a block that only has the entry block as a pred. */
501 /* This is the register to which the phi function will be assinged. */
502 reg = regno_reg_rtx[regno + FIRST_PSEUDO_REGISTER];
504 /* Construct the arguments to the PHI node. The use of pc_rtx is just
505 a placeholder; we'll insert the proper value in rename_registers. */
506 vec = rtvec_alloc (npred * 2);
507 for (e = b->pred, i = 0; e ; e = e->pred_next, i += 2)
508 if (e->src != ENTRY_BLOCK_PTR)
510 RTVEC_ELT (vec, i + 0) = pc_rtx;
511 RTVEC_ELT (vec, i + 1) = GEN_INT (e->src->index);
514 phi = gen_rtx_PHI (VOIDmode, vec);
515 phi = gen_rtx_SET (VOIDmode, reg, phi);
517 insn = first_insn_after_basic_block_note (b);
518 end_p = PREV_INSN (insn) == b->end;
519 emit_insn_before (phi, insn);
521 b->end = PREV_INSN (insn);
526 insert_phi_nodes (idfs, evals, nregs)
528 sbitmap *evals ATTRIBUTE_UNUSED;
533 for (reg = 0; reg < nregs; ++reg)
536 EXECUTE_IF_SET_IN_SBITMAP (idfs[reg], 0, b,
538 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
539 reg + FIRST_PSEUDO_REGISTER))
540 insert_phi_node (reg, b);
545 /* Rename the registers to conform to SSA.
547 This is essentially the algorithm presented in Figure 7.8 of Morgan,
548 with a few changes to reduce pattern search time in favour of a bit
549 more memory usage. */
552 /* One of these is created for each set. It will live in a list local
553 to its basic block for the duration of that block's processing. */
554 struct rename_set_data
556 struct rename_set_data *next;
557 /* This is the SET_DEST of the (first) SET that sets the REG. */
559 /* This is what used to be at *REG_LOC. */
561 /* This is the REG that will replace OLD_REG. It's set only
562 when the rename data is moved onto the DONE_RENAMES queue. */
564 /* This is what to restore ssa_rename_to[REGNO (old_reg)] to.
565 It is usually the previous contents of ssa_rename_to[REGNO (old_reg)]. */
567 /* This is the insn that contains all the SETs of the REG. */
571 /* This struct is used to pass information to callback functions while
572 renaming registers. */
573 struct rename_context
575 struct rename_set_data *new_renames;
576 struct rename_set_data *done_renames;
580 /* Queue the rename of *REG_LOC. */
582 create_delayed_rename (c, reg_loc)
583 struct rename_context *c;
586 struct rename_set_data *r;
587 r = (struct rename_set_data *) xmalloc (sizeof(*r));
589 if (GET_CODE (*reg_loc) != REG
590 || REGNO (*reg_loc) < FIRST_PSEUDO_REGISTER)
593 r->reg_loc = reg_loc;
594 r->old_reg = *reg_loc;
595 r->prev_reg = ssa_rename_to [REGNO (r->old_reg) - FIRST_PSEUDO_REGISTER];
596 r->set_insn = c->current_insn;
597 r->next = c->new_renames;
601 /* This is part of a rather ugly hack to allow the pre-ssa regno to be
602 reused. If, during processing, a register has not yet been touched,
603 ssa_rename_to[regno] will be NULL. Now, in the course of pushing
604 and popping values from ssa_rename_to, when we would ordinarily
605 pop NULL back in, we pop RENAME_NO_RTX. We treat this exactly the
606 same as NULL, except that it signals that the original regno has
607 already been reused. */
608 #define RENAME_NO_RTX pc_rtx
610 /* Move all the entries from NEW_RENAMES onto DONE_RENAMES by
611 applying all the renames on NEW_RENAMES. */
614 apply_delayed_renames (c)
615 struct rename_context *c;
617 struct rename_set_data *r;
618 struct rename_set_data *last_r = NULL;
620 for (r = c->new_renames; r != NULL; r = r->next)
622 int regno = REGNO (r->old_reg);
625 /* Failure here means that someone has a PARALLEL that sets
626 a register twice (bad!). */
627 if (ssa_rename_to [regno - FIRST_PSEUDO_REGISTER] != r->prev_reg)
629 /* Failure here means we have changed REG_LOC before applying
631 /* For the first set we come across, reuse the original regno. */
632 if (r->prev_reg == NULL_RTX)
634 r->new_reg = r->old_reg;
635 /* We want to restore RENAME_NO_RTX rather than NULL_RTX. */
636 r->prev_reg = RENAME_NO_RTX;
639 r->new_reg = gen_reg_rtx (GET_MODE (r->old_reg));
640 new_regno = REGNO (r->new_reg);
641 ssa_rename_to[regno - FIRST_PSEUDO_REGISTER] = r->new_reg;
643 if (new_regno >= (int) ssa_definition->num_elements)
645 int new_limit = new_regno * 5 / 4;
646 ssa_definition = VARRAY_GROW (ssa_definition, new_limit);
647 ssa_uses = VARRAY_GROW (ssa_uses, new_limit);
648 ssa_rename_from = VARRAY_GROW (ssa_rename_from, new_limit);
651 VARRAY_RTX (ssa_definition, new_regno) = r->set_insn;
652 VARRAY_RTX (ssa_rename_from, new_regno) = r->old_reg;
658 last_r->next = c->done_renames;
659 c->done_renames = c->new_renames;
660 c->new_renames = NULL;
664 /* Part one of the first step of rename_block, called through for_each_rtx.
665 Mark pseudos that are set for later update. Transform uses of pseudos. */
668 rename_insn_1 (ptr, data)
673 struct rename_context *context = data;
678 switch (GET_CODE (x))
682 rtx *destp = &SET_DEST (x);
683 rtx dest = SET_DEST (x);
685 /* Some SETs also use the REG specified in their LHS.
686 These can be detected by the presence of
687 STRICT_LOW_PART, SUBREG, SIGN_EXTRACT, and ZERO_EXTRACT
688 in the LHS. Handle these by changing
689 (set (subreg (reg foo)) ...)
691 (sequence [(set (reg foo_1) (reg foo))
692 (set (subreg (reg foo_1)) ...)])
694 FIXME: Much of the time this is too much. For many libcalls,
695 paradoxical SUBREGs, etc., the input register is dead. We should
696 recognise this in rename_block or here and not make a false
699 if (GET_CODE (dest) == STRICT_LOW_PART
700 || GET_CODE (dest) == SUBREG
701 || GET_CODE (dest) == SIGN_EXTRACT
702 || GET_CODE (dest) == ZERO_EXTRACT)
707 while (GET_CODE (reg) == STRICT_LOW_PART
708 || GET_CODE (reg) == SUBREG
709 || GET_CODE (reg) == SIGN_EXTRACT
710 || GET_CODE (reg) == ZERO_EXTRACT)
713 if (GET_CODE (reg) == REG
714 && REGNO (reg) >= FIRST_PSEUDO_REGISTER)
716 /* Generate (set reg reg), and do renaming on it so
717 that it becomes (set reg_1 reg_0), and we will
718 replace reg with reg_1 in the SUBREG. */
720 struct rename_set_data *saved_new_renames;
721 saved_new_renames = context->new_renames;
722 context->new_renames = NULL;
723 i = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
724 for_each_rtx (&i, rename_insn_1, data);
725 apply_delayed_renames (context);
726 context->new_renames = saved_new_renames;
729 else if (GET_CODE (dest) == REG
730 && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
732 /* We found a genuine set of an interesting register. Tag
733 it so that we can create a new name for it after we finish
734 processing this insn. */
736 create_delayed_rename (context, destp);
738 /* Since we do not wish to (directly) traverse the
739 SET_DEST, recurse through for_each_rtx for the SET_SRC
741 if (GET_CODE (x) == SET)
742 for_each_rtx (&SET_SRC (x), rename_insn_1, data);
746 /* Otherwise, this was not an interesting destination. Continue
747 on, marking uses as normal. */
752 if (REGNO (x) >= FIRST_PSEUDO_REGISTER
753 && REGNO (x) < ssa_max_reg_num)
755 rtx new_reg = ssa_rename_to[REGNO(x) - FIRST_PSEUDO_REGISTER];
757 if (new_reg != NULL_RTX && new_reg != RENAME_NO_RTX)
759 if (GET_MODE (x) != GET_MODE (new_reg))
763 /* Else this is a use before a set. Warn? */
768 /* Never muck with the phi. We do that elsewhere, special-like. */
772 /* Anything else, continue traversing. */
778 rename_block (bb, idom)
782 basic_block b = BASIC_BLOCK (bb);
784 rtx insn, next, last;
785 struct rename_set_data *set_data = NULL;
788 /* Step One: Walk the basic block, adding new names for sets and
796 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
798 struct rename_context context;
799 context.done_renames = set_data;
800 context.new_renames = NULL;
801 context.current_insn = insn;
804 for_each_rtx (&PATTERN (insn), rename_insn_1, &context);
805 for_each_rtx (®_NOTES (insn), rename_insn_1, &context);
807 /* Sometimes, we end up with a sequence of insns that
808 SSA needs to treat as a single insn. Wrap these in a
809 SEQUENCE. (Any notes now get attached to the SEQUENCE,
810 not to the old version inner insn.) */
811 if (get_insns () != NULL_RTX)
816 emit (PATTERN (insn));
817 seq = gen_sequence ();
818 /* We really want a SEQUENCE of SETs, not a SEQUENCE
820 for (i = 0; i < XVECLEN (seq, 0); i++)
821 XVECEXP (seq, 0, i) = PATTERN (XVECEXP (seq, 0, i));
822 PATTERN (insn) = seq;
826 apply_delayed_renames (&context);
827 set_data = context.done_renames;
830 next = NEXT_INSN (insn);
832 while (insn != last);
834 /* Step Two: Update the phi nodes of this block's successors. */
836 for (e = b->succ; e; e = e->succ_next)
838 if (e->dest == EXIT_BLOCK_PTR)
841 insn = first_insn_after_basic_block_note (e->dest);
843 while (PHI_NODE_P (insn))
845 rtx phi = PATTERN (insn);
849 /* Find out which of our outgoing registers this node is
850 indended to replace. Note that if this not the first PHI
851 node to have been created for this register, we have to
852 jump through rename links to figure out which register
853 we're talking about. This can easily be recognized by
854 noting that the regno is new to this pass. */
855 regno = REGNO (SET_DEST (phi));
856 if (regno >= ssa_max_reg_num)
857 regno = REGNO (VARRAY_RTX (ssa_rename_from, regno));
858 reg = ssa_rename_to[regno - FIRST_PSEUDO_REGISTER];
860 /* It is possible for the variable to be uninitialized on
861 edges in. Reduce the arity of the PHI so that we don't
862 consider those edges. */
863 if (reg == NULL || reg == RENAME_NO_RTX)
865 if (! remove_phi_alternative (phi, bb))
870 /* When we created the PHI nodes, we did not know what mode
871 the register should be. Now that we've found an original,
872 we can fill that in. */
873 if (GET_MODE (SET_DEST (phi)) == VOIDmode)
874 PUT_MODE (SET_DEST (phi), GET_MODE (reg));
875 else if (GET_MODE (SET_DEST (phi)) != GET_MODE (reg))
878 *phi_alternative (phi, bb) = reg;
879 /* ??? Mark for a new ssa_uses entry. */
882 insn = NEXT_INSN (insn);
886 /* Step Three: Do the same to the children of this block in
889 for (c = 0; c < n_basic_blocks; ++c)
891 rename_block (c, idom);
893 /* Step Four: Update the sets to refer to their new register,
894 and restore ssa_rename_to to its previous state. */
898 struct rename_set_data *next;
899 rtx old_reg = *set_data->reg_loc;
901 if (*set_data->reg_loc != set_data->old_reg)
903 *set_data->reg_loc = set_data->new_reg;
905 ssa_rename_to[REGNO (old_reg)-FIRST_PSEUDO_REGISTER]
906 = set_data->prev_reg;
908 next = set_data->next;
915 rename_registers (nregs, idom)
919 VARRAY_RTX_INIT (ssa_definition, nregs * 3, "ssa_definition");
920 VARRAY_RTX_INIT (ssa_uses, nregs * 3, "ssa_uses");
921 VARRAY_RTX_INIT (ssa_rename_from, nregs * 3, "ssa_rename_from");
923 ssa_rename_to = (rtx *) alloca (nregs * sizeof(rtx));
924 bzero ((char *) ssa_rename_to, nregs * sizeof(rtx));
926 rename_block (0, idom);
928 /* ??? Update basic_block_live_at_start, and other flow info
931 ssa_rename_to = NULL;
935 /* The main entry point for moving to SSA. */
940 /* Element I is the set of blocks that set register I. */
943 /* Dominator bitmaps. */
948 /* Element I is the immediate dominator of block I. */
953 /* Don't do it twice. */
957 /* Need global_live_at_{start,end} up to date. */
958 life_analysis (get_insns (), NULL, PROP_KILL_DEAD_CODE | PROP_SCAN_DEAD_CODE);
960 /* Compute dominators. */
961 dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
962 compute_flow_dominators (dominators, NULL);
964 idom = (int *) alloca (n_basic_blocks * sizeof (int));
965 memset ((void *)idom, -1, (size_t)n_basic_blocks * sizeof (int));
966 simplify_to_immediate_dominators (idom, dominators);
968 sbitmap_vector_free (dominators);
973 fputs (";; Immediate Dominators:\n", rtl_dump_file);
974 for (i = 0; i < n_basic_blocks; ++i)
975 fprintf (rtl_dump_file, ";\t%3d = %3d\n", i, idom[i]);
976 fflush (rtl_dump_file);
979 /* Compute dominance frontiers. */
981 dfs = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
982 compute_dominance_frontiers (dfs, idom);
986 dump_sbitmap_vector (rtl_dump_file, ";; Dominance Frontiers:",
987 "; Basic Block", dfs, n_basic_blocks);
988 fflush (rtl_dump_file);
991 /* Compute register evaluations. */
993 ssa_max_reg_num = max_reg_num();
994 nregs = ssa_max_reg_num - FIRST_PSEUDO_REGISTER;
995 evals = sbitmap_vector_alloc (nregs, n_basic_blocks);
996 find_evaluations (evals, nregs);
998 /* Compute the iterated dominance frontier for each register. */
1000 idfs = sbitmap_vector_alloc (nregs, n_basic_blocks);
1001 compute_iterated_dominance_frontiers (idfs, dfs, evals, nregs);
1005 dump_sbitmap_vector (rtl_dump_file, ";; Iterated Dominance Frontiers:",
1006 "; Register-FIRST_PSEUDO_REGISTER", idfs, nregs);
1007 fflush (rtl_dump_file);
1010 /* Insert the phi nodes. */
1012 insert_phi_nodes (idfs, evals, nregs);
1014 /* Rename the registers to satisfy SSA. */
1016 rename_registers (nregs, idom);
1018 /* All done! Clean up and go home. */
1020 sbitmap_vector_free (dfs);
1021 sbitmap_vector_free (evals);
1022 sbitmap_vector_free (idfs);
1025 reg_scan (get_insns (), max_reg_num (), 1);
1029 /* REG is the representative temporary of its partition. Add it to the
1030 set of nodes to be processed, if it hasn't been already. Return the
1031 index of this register in the node set. */
1034 ephi_add_node (reg, nodes, n_nodes)
1039 for (i = *n_nodes - 1; i >= 0; --i)
1040 if (REGNO (reg) == REGNO (nodes[i]))
1043 nodes[i = (*n_nodes)++] = reg;
1047 /* Part one of the topological sort. This is a forward (downward) search
1048 through the graph collecting a stack of nodes to process. Assuming no
1049 cycles, the nodes at top of the stack when we are finished will have
1050 no other dependancies. */
1053 ephi_forward (t, visited, succ, tstack)
1061 SET_BIT (visited, t);
1063 EXECUTE_IF_SET_IN_SBITMAP (succ[t], 0, s,
1065 if (! TEST_BIT (visited, s))
1066 tstack = ephi_forward (s, visited, succ, tstack);
1073 /* Part two of the topological sort. The is a backward search through
1074 a cycle in the graph, copying the data forward as we go. */
1077 ephi_backward (t, visited, pred, nodes)
1079 sbitmap visited, *pred;
1084 SET_BIT (visited, t);
1086 EXECUTE_IF_SET_IN_SBITMAP (pred[t], 0, p,
1088 if (! TEST_BIT (visited, p))
1090 ephi_backward (p, visited, pred, nodes);
1091 emit_move_insn (nodes[p], nodes[t]);
1096 /* Part two of the topological sort. Create the copy for a register
1097 and any cycle of which it is a member. */
1100 ephi_create (t, visited, pred, succ, nodes)
1102 sbitmap visited, *pred, *succ;
1105 rtx reg_u = NULL_RTX;
1106 int unvisited_predecessors = 0;
1109 /* Iterate through the predecessor list looking for unvisited nodes.
1110 If there are any, we have a cycle, and must deal with that. At
1111 the same time, look for a visited predecessor. If there is one,
1112 we won't need to create a temporary. */
1114 EXECUTE_IF_SET_IN_SBITMAP (pred[t], 0, p,
1116 if (! TEST_BIT (visited, p))
1117 unvisited_predecessors = 1;
1122 if (unvisited_predecessors)
1124 /* We found a cycle. Copy out one element of the ring (if necessary),
1125 then traverse the ring copying as we go. */
1129 reg_u = gen_reg_rtx (GET_MODE (nodes[t]));
1130 emit_move_insn (reg_u, nodes[t]);
1133 EXECUTE_IF_SET_IN_SBITMAP (pred[t], 0, p,
1135 if (! TEST_BIT (visited, p))
1137 ephi_backward (p, visited, pred, nodes);
1138 emit_move_insn (nodes[p], reg_u);
1144 /* No cycle. Just copy the value from a successor. */
1147 EXECUTE_IF_SET_IN_SBITMAP (succ[t], 0, s,
1149 SET_BIT (visited, t);
1150 emit_move_insn (nodes[t], nodes[s]);
1156 /* Convert the edge to normal form. */
1159 eliminate_phi (e, reg_partition)
1161 partition reg_partition;
1164 sbitmap *pred, *succ;
1167 int *stack, *tstack;
1171 /* Collect an upper bound on the number of registers needing processing. */
1173 insn = first_insn_after_basic_block_note (e->dest);
1176 while (PHI_NODE_P (insn))
1178 insn = next_nonnote_insn (insn);
1185 /* Build the auxilliary graph R(B).
1187 The nodes of the graph are the members of the register partition
1188 present in Phi(B). There is an edge from FIND(T0)->FIND(T1) for
1189 each T0 = PHI(...,T1,...), where T1 is for the edge from block C. */
1191 nodes = (rtx *) alloca (n_nodes * sizeof(rtx));
1192 pred = sbitmap_vector_alloc (n_nodes, n_nodes);
1193 succ = sbitmap_vector_alloc (n_nodes, n_nodes);
1194 sbitmap_vector_zero (pred, n_nodes);
1195 sbitmap_vector_zero (succ, n_nodes);
1197 insn = first_insn_after_basic_block_note (e->dest);
1200 for (; PHI_NODE_P (insn); insn = next_nonnote_insn (insn))
1202 rtx* preg = phi_alternative (PATTERN (insn), e->src->index);
1203 rtx tgt = SET_DEST (PATTERN (insn));
1206 /* There may be no phi alternative corresponding to this edge.
1207 This indicates that the phi variable is undefined along this
1213 if (GET_CODE (reg) != REG || GET_CODE (tgt) != REG)
1216 reg = regno_reg_rtx[partition_find (reg_partition, REGNO (reg))];
1217 tgt = regno_reg_rtx[partition_find (reg_partition, REGNO (tgt))];
1218 /* If the two registers are already in the same partition,
1219 nothing will need to be done. */
1224 ireg = ephi_add_node (reg, nodes, &n_nodes);
1225 itgt = ephi_add_node (tgt, nodes, &n_nodes);
1227 SET_BIT (pred[ireg], itgt);
1228 SET_BIT (succ[itgt], ireg);
1235 /* Begin a topological sort of the graph. */
1237 visited = sbitmap_alloc (n_nodes);
1238 sbitmap_zero (visited);
1240 tstack = stack = (int *) alloca (n_nodes * sizeof (int));
1242 for (i = 0; i < n_nodes; ++i)
1243 if (! TEST_BIT (visited, i))
1244 tstack = ephi_forward (i, visited, succ, tstack);
1246 sbitmap_zero (visited);
1248 /* As we find a solution to the tsort, collect the implementation
1249 insns in a sequence. */
1252 while (tstack != stack)
1255 if (! TEST_BIT (visited, i))
1256 ephi_create (i, visited, pred, succ, nodes);
1259 insn = gen_sequence ();
1261 insert_insn_on_edge (insn, e);
1263 fprintf (rtl_dump_file, "Emitting copy on edge (%d,%d)\n",
1264 e->src->index, e->dest->index);
1266 sbitmap_free (visited);
1268 sbitmap_vector_free (pred);
1269 sbitmap_vector_free (succ);
1273 /* For basic block B, consider all phi insns which provide an
1274 alternative corresponding to an incoming abnormal critical edge.
1275 Place the phi alternative corresponding to that abnormal critical
1276 edge in the same register class as the destination of the set.
1278 From Morgan, p. 178:
1280 For each abnormal critical edge (C, B),
1281 if T0 = phi (T1, ..., Ti, ..., Tm) is a phi node in B,
1282 and C is the ith predecessor of B,
1283 then T0 and Ti must be equivalent.
1285 Return non-zero iff any such cases were found for which the two
1286 regs were not already in the same class. */
1289 make_regs_equivalent_over_bad_edges (bb, reg_partition)
1291 partition reg_partition;
1294 basic_block b = BASIC_BLOCK (bb);
1297 /* Advance to the first phi node. */
1298 phi = first_insn_after_basic_block_note (b);
1300 /* Scan all the phi nodes. */
1303 phi = next_nonnote_insn (phi))
1307 rtx set = PATTERN (phi);
1308 rtx tgt = SET_DEST (set);
1310 /* The set target is expected to be a pseudo. */
1311 if (GET_CODE (tgt) != REG
1312 || REGNO (tgt) < FIRST_PSEUDO_REGISTER)
1314 tgt_regno = REGNO (tgt);
1316 /* Scan incoming abnormal critical edges. */
1317 for (e = b->pred; e; e = e->pred_next)
1318 if ((e->flags & (EDGE_ABNORMAL | EDGE_CRITICAL))
1319 == (EDGE_ABNORMAL | EDGE_CRITICAL))
1321 rtx *alt = phi_alternative (set, e->src->index);
1324 /* If there is no alternative corresponding to this edge,
1325 the value is undefined along the edge, so just go on. */
1329 /* The phi alternative is expected to be a pseudo. */
1330 if (GET_CODE (*alt) != REG
1331 || REGNO (*alt) < FIRST_PSEUDO_REGISTER)
1333 alt_regno = REGNO (*alt);
1335 /* If the set destination and the phi alternative aren't
1336 already in the same class... */
1337 if (partition_find (reg_partition, tgt_regno)
1338 != partition_find (reg_partition, alt_regno))
1340 /* ... make them such. */
1341 partition_union (reg_partition,
1342 tgt_regno, alt_regno);
1352 /* Consider phi insns in basic block BB pairwise. If the set target
1353 of both isns are equivalent pseudos, make the corresponding phi
1354 alternatives in each phi corresponding equivalent.
1356 Return nonzero if any new register classes were unioned. */
1359 make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
1361 partition reg_partition;
1364 basic_block b = BASIC_BLOCK (bb);
1367 /* Advance to the first phi node. */
1368 phi = first_insn_after_basic_block_note (b);
1370 /* Scan all the phi nodes. */
1373 phi = next_nonnote_insn (phi))
1375 rtx set = PATTERN (phi);
1376 /* The regno of the destination of the set. */
1377 int tgt_regno = REGNO (SET_DEST (PATTERN (phi)));
1379 rtx phi2 = next_nonnote_insn (phi);
1381 /* Scan all phi nodes following this one. */
1384 phi2 = next_nonnote_insn (phi2))
1386 rtx set2 = PATTERN (phi2);
1387 /* The regno of the destination of the set. */
1388 int tgt2_regno = REGNO (SET_DEST (set2));
1390 /* Are the set destinations equivalent regs? */
1391 if (partition_find (reg_partition, tgt_regno) ==
1392 partition_find (reg_partition, tgt2_regno))
1395 /* Scan over edges. */
1396 for (e = b->pred; e; e = e->pred_next)
1398 int pred_block = e->src->index;
1399 /* Identify the phi altnernatives from both phi
1400 nodes corresponding to this edge. */
1401 rtx *alt = phi_alternative (set, pred_block);
1402 rtx *alt2 = phi_alternative (set2, pred_block);
1404 /* If one of the phi nodes doesn't have a
1405 corresponding alternative, just skip it. */
1406 if (alt == 0 || alt2 == 0)
1409 /* Both alternatives should be pseudos. */
1410 if (GET_CODE (*alt) != REG
1411 || REGNO (*alt) < FIRST_PSEUDO_REGISTER)
1413 if (GET_CODE (*alt2) != REG
1414 || REGNO (*alt2) < FIRST_PSEUDO_REGISTER)
1417 /* If the altneratives aren't already in the same
1419 if (partition_find (reg_partition, REGNO (*alt))
1420 != partition_find (reg_partition, REGNO (*alt2)))
1422 /* ... make them so. */
1423 partition_union (reg_partition,
1424 REGNO (*alt), REGNO (*alt2));
1435 /* Compute a conservative partition of outstanding pseudo registers.
1436 See Morgan 7.3.1. */
1439 compute_conservative_reg_partition ()
1444 /* We don't actually work with hard registers, but it's easier to
1445 carry them around anyway rather than constantly doing register
1446 number arithmetic. */
1448 partition_new (ssa_definition->num_elements + FIRST_PSEUDO_REGISTER);
1450 /* The first priority is to make sure registers that might have to
1451 be copied on abnormal critical edges are placed in the same
1452 partition. This saves us from having to split abnormal critical
1454 for (bb = n_basic_blocks; --bb >= 0; )
1455 changed += make_regs_equivalent_over_bad_edges (bb, p);
1457 /* Now we have to insure that corresponding arguments of phi nodes
1458 assigning to corresponding regs are equivalent. Iterate until
1463 for (bb = n_basic_blocks; --bb >= 0; )
1464 changed += make_equivalent_phi_alternatives_equivalent (bb, p);
1470 /* The following functions compute a register partition that attempts
1471 to eliminate as many reg copies and phi node copies as possible by
1472 coalescing registers. This is the strategy:
1474 1. As in the conservative case, the top priority is to coalesce
1475 registers that otherwise would cause copies to be placed on
1476 abnormal critical edges (which isn't possible).
1478 2. Figure out which regs are involved (in the LHS or RHS) of
1479 copies and phi nodes. Compute conflicts among these regs.
1481 3. Walk around the instruction stream, placing two regs in the
1482 same class of the partition if one appears on the LHS and the
1483 other on the RHS of a copy or phi node and the two regs don't
1484 conflict. The conflict information of course needs to be
1487 4. If anything has changed, there may be new opportunities to
1488 coalesce regs, so go back to 2.
1491 /* If REG1 and REG2 don't conflict in CONFLICTS, place them in the
1492 same class of partition P, if they aren't already. Update
1493 CONFLICTS appropriately.
1495 Returns one if REG1 and REG2 were placed in the same class but were
1496 not previously; zero otherwise.
1498 See Morgan figure 11.15. */
1501 coalesce_if_unconflicting (p, conflicts, reg1, reg2)
1503 conflict_graph conflicts;
1509 /* Don't mess with hard regs. */
1510 if (reg1 < FIRST_PSEUDO_REGISTER || reg2 < FIRST_PSEUDO_REGISTER)
1513 /* Find the canonical regs for the classes containing REG1 and
1515 reg1 = partition_find (p, reg1);
1516 reg2 = partition_find (p, reg2);
1518 /* If they're already in the same class, there's nothing to do. */
1522 /* If the regs conflict, our hands are tied. */
1523 if (conflict_graph_conflict_p (conflicts, reg1, reg2))
1526 /* We're good to go. Put the regs in the same partition. */
1527 partition_union (p, reg1, reg2);
1529 /* Find the new canonical reg for the merged class. */
1530 reg = partition_find (p, reg1);
1532 /* Merge conflicts from the two previous classes. */
1533 conflict_graph_merge_regs (conflicts, reg, reg1);
1534 conflict_graph_merge_regs (conflicts, reg, reg2);
1539 /* For each register copy insn in basic block BB, place the LHS and
1540 RHS regs in the same class in partition P if they do not conflict
1541 according to CONFLICTS.
1543 Returns the number of changes that were made to P.
1545 See Morgan figure 11.14. */
1548 coalesce_regs_in_copies (bb, p, conflicts)
1551 conflict_graph conflicts;
1557 /* Scan the instruction stream of the block. */
1558 for (insn = bb->head; insn != end; insn = NEXT_INSN (insn))
1564 /* If this isn't a set insn, go to the next insn. */
1565 if (GET_CODE (insn) != INSN)
1567 pattern = PATTERN (insn);
1568 if (GET_CODE (pattern) != SET)
1571 src = SET_SRC (pattern);
1572 dest = SET_DEST (pattern);
1574 /* We're only looking for copies. */
1575 if (GET_CODE (src) != REG || GET_CODE (dest) != REG)
1578 /* Coalesce only if the reg modes are the same. As long as
1579 each reg's rtx is unique, it can have only one mode, so two
1580 pseudos of different modes can't be coalesced into one.
1582 FIXME: We can probably get around this by inserting SUBREGs
1583 where appropriate, but for now we don't bother. */
1584 if (GET_MODE (src) != GET_MODE (dest))
1587 /* Found a copy; see if we can use the same reg for both the
1588 source and destination (and thus eliminate the copy,
1590 changed += coalesce_if_unconflicting (p, conflicts,
1591 REGNO (src), REGNO (dest));
1598 struct phi_coalesce_context
1601 conflict_graph conflicts;
1605 /* Callback function for for_each_successor_phi. If the set
1606 destination and the phi alternative regs do not conflict, place
1607 them in the same paritition class. DATA is a pointer to a
1608 phi_coalesce_context struct. */
1611 coalesce_reg_in_phi (insn, dest_regno, src_regno, data)
1612 rtx insn ATTRIBUTE_UNUSED;
1617 struct phi_coalesce_context *context =
1618 (struct phi_coalesce_context *) data;
1620 /* Attempt to use the same reg, if they don't conflict. */
1622 += coalesce_if_unconflicting (context->p, context->conflicts,
1623 dest_regno, src_regno);
1627 /* For each alternative in a phi function corresponding to basic block
1628 BB (in phi nodes in successor block to BB), place the reg in the
1629 phi alternative and the reg to which the phi value is set into the
1630 same class in partition P, if allowed by CONFLICTS.
1632 Return the number of changes that were made to P.
1634 See Morgan figure 11.14. */
1637 coalesce_regs_in_successor_phi_nodes (bb, p, conflicts)
1640 conflict_graph conflicts;
1642 struct phi_coalesce_context context;
1644 context.conflicts = conflicts;
1645 context.changed = 0;
1647 for_each_successor_phi (bb, &coalesce_reg_in_phi, &context);
1649 return context.changed;
1652 /* Compute and return a partition of pseudos. Where possible,
1653 non-conflicting pseudos are placed in the same class.
1655 The caller is responsible for deallocating the returned partition. */
1658 compute_coalesced_reg_partition ()
1663 /* We don't actually work with hard registers, but it's easier to
1664 carry them around anyway rather than constantly doing register
1665 number arithmetic. */
1667 partition_new (ssa_definition->num_elements + FIRST_PSEUDO_REGISTER);
1669 /* The first priority is to make sure registers that might have to
1670 be copied on abnormal critical edges are placed in the same
1671 partition. This saves us from having to split abnormal critical
1672 edges (which can't be done). */
1673 for (bb = n_basic_blocks; --bb >= 0; )
1674 make_regs_equivalent_over_bad_edges (bb, p);
1678 regset_head phi_set;
1679 conflict_graph conflicts;
1683 /* Build the set of registers involved in phi nodes, either as
1684 arguments to the phi function or as the target of a set. */
1685 INITIALIZE_REG_SET (phi_set);
1686 mark_phi_and_copy_regs (&phi_set);
1688 /* Compute conflicts. */
1689 conflicts = conflict_graph_compute (&phi_set, p);
1691 /* FIXME: Better would be to process most frequently executed
1692 blocks first, so that most frequently executed copies would
1693 be more likely to be removed by register coalescing. But any
1694 order will generate correct, if non-optimal, results. */
1695 for (bb = n_basic_blocks; --bb >= 0; )
1697 basic_block block = BASIC_BLOCK (bb);
1698 changed += coalesce_regs_in_copies (block, p, conflicts);
1700 coalesce_regs_in_successor_phi_nodes (block, p, conflicts);
1703 conflict_graph_delete (conflicts);
1705 while (changed > 0);
1710 /* Mark the regs in a phi node. PTR is a phi expression or one of its
1711 components (a REG or a CONST_INT). DATA is a reg set in which to
1712 set all regs. Called from for_each_rtx. */
1715 mark_reg_in_phi (ptr, data)
1720 regset set = (regset) data;
1722 switch (GET_CODE (expr))
1725 SET_REGNO_REG_SET (set, REGNO (expr));
1735 /* Mark in PHI_SET all pseudos that are used in a phi node -- either
1736 set from a phi expression, or used as an argument in one. Also
1737 mark regs that are the source or target of a reg copy. Uses
1741 mark_phi_and_copy_regs (phi_set)
1746 /* Scan the definitions of all regs. */
1747 for (reg = VARRAY_SIZE (ssa_definition);
1748 --reg >= FIRST_PSEUDO_REGISTER;
1751 rtx insn = VARRAY_RTX (ssa_definition, reg);
1757 pattern = PATTERN (insn);
1758 /* Sometimes we get PARALLEL insns. These aren't phi nodes or
1760 if (GET_CODE (pattern) != SET)
1762 src = SET_SRC (pattern);
1764 if (GET_CODE (src) == REG)
1766 /* It's a reg copy. */
1767 SET_REGNO_REG_SET (phi_set, reg);
1768 SET_REGNO_REG_SET (phi_set, REGNO (src));
1770 else if (GET_CODE (src) == PHI)
1772 /* It's a phi node. Mark the reg being set. */
1773 SET_REGNO_REG_SET (phi_set, reg);
1774 /* Mark the regs used in the phi function. */
1775 for_each_rtx (&src, mark_reg_in_phi, phi_set);
1777 /* ... else nothing to do. */
1781 /* Rename regs in insn PTR that are equivalent. DATA is the register
1782 partition which specifies equivalences. */
1785 rename_equivalent_regs_in_insn (ptr, data)
1790 partition reg_partition = (partition) data;
1795 switch (GET_CODE (x))
1798 if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
1800 int regno = REGNO (x);
1801 int new_regno = partition_find (reg_partition, regno);
1802 if (regno != new_regno)
1804 rtx new_reg = regno_reg_rtx[new_regno];
1805 if (GET_MODE (x) != GET_MODE (new_reg))
1813 /* No need to rename the phi nodes. We'll check equivalence
1814 when inserting copies. */
1818 /* Anything else, continue traversing. */
1823 /* Rename regs that are equivalent in REG_PARTITION.
1824 Also collapse any SEQUENCE insns. */
1827 rename_equivalent_regs (reg_partition)
1828 partition reg_partition;
1832 for (bb = n_basic_blocks; --bb >= 0; )
1834 basic_block b = BASIC_BLOCK (bb);
1842 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1844 for_each_rtx (&PATTERN (insn),
1845 rename_equivalent_regs_in_insn,
1847 for_each_rtx (®_NOTES (insn),
1848 rename_equivalent_regs_in_insn,
1851 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
1853 rtx s = PATTERN (insn);
1854 int slen = XVECLEN (s, 0);
1860 PATTERN (insn) = XVECEXP (s, 0, slen-1);
1861 for (i = 0; i < slen - 1; i++)
1862 emit_block_insn_before (XVECEXP (s, 0, i), insn, b);
1866 next = NEXT_INSN (insn);
1868 while (insn != last);
1872 /* The main entry point for moving from SSA. */
1878 partition reg_partition;
1879 rtx insns = get_insns ();
1881 /* Need global_live_at_{start,end} up to date. */
1882 life_analysis (insns, NULL,
1883 PROP_KILL_DEAD_CODE | PROP_SCAN_DEAD_CODE | PROP_DEATH_NOTES);
1885 /* Figure out which regs in copies and phi nodes don't conflict and
1886 therefore can be coalesced. */
1887 if (conservative_reg_partition)
1888 reg_partition = compute_conservative_reg_partition ();
1890 reg_partition = compute_coalesced_reg_partition ();
1892 rename_equivalent_regs (reg_partition);
1894 /* Eliminate the PHI nodes. */
1895 for (bb = n_basic_blocks; --bb >= 0; )
1897 basic_block b = BASIC_BLOCK (bb);
1900 for (e = b->pred; e; e = e->pred_next)
1901 if (e->src != ENTRY_BLOCK_PTR)
1902 eliminate_phi (e, reg_partition);
1905 partition_delete (reg_partition);
1907 /* Actually delete the PHI nodes. */
1908 for (bb = n_basic_blocks; --bb >= 0; )
1910 rtx insn = BLOCK_HEAD (bb);
1914 /* If this is a PHI node delete it. */
1915 if (PHI_NODE_P (insn))
1917 if (insn == BLOCK_END (bb))
1918 BLOCK_END (bb) = PREV_INSN (insn);
1919 insn = delete_insn (insn);
1921 /* Since all the phi nodes come at the beginning of the
1922 block, if we find an ordinary insn, we can stop looking
1923 for more phi nodes. */
1924 else if (INSN_P (insn))
1926 /* If we've reached the end of the block, stop. */
1927 else if (insn == BLOCK_END (bb))
1930 insn = NEXT_INSN (insn);
1934 /* Commit all the copy nodes needed to convert out of SSA form. */
1935 commit_edge_insertions ();
1939 count_or_remove_death_notes (NULL, 1);
1942 /* Scan phi nodes in successors to BB. For each such phi node that
1943 has a phi alternative value corresponding to BB, invoke FN. FN
1944 is passed the entire phi node insn, the regno of the set
1945 destination, the regno of the phi argument corresponding to BB,
1948 If FN ever returns non-zero, stops immediately and returns this
1949 value. Otherwise, returns zero. */
1952 for_each_successor_phi (bb, fn, data)
1954 successor_phi_fn fn;
1959 if (bb == EXIT_BLOCK_PTR)
1962 /* Scan outgoing edges. */
1963 for (e = bb->succ; e != NULL; e = e->succ_next)
1967 basic_block successor = e->dest;
1968 if (successor == ENTRY_BLOCK_PTR
1969 || successor == EXIT_BLOCK_PTR)
1972 /* Advance to the first non-label insn of the successor block. */
1973 insn = first_insn_after_basic_block_note (successor);
1978 /* Scan phi nodes in the successor. */
1979 for ( ; PHI_NODE_P (insn); insn = NEXT_INSN (insn))
1982 rtx phi_set = PATTERN (insn);
1983 rtx *alternative = phi_alternative (phi_set, bb->index);
1986 /* This phi function may not have an alternative
1987 corresponding to the incoming edge, indicating the
1988 assigned variable is not defined along the edge. */
1989 if (alternative == NULL)
1991 phi_src = *alternative;
1993 /* Invoke the callback. */
1994 result = (*fn) (insn, REGNO (SET_DEST (phi_set)),
1995 REGNO (phi_src), data);
1997 /* Terminate if requested. */